Re-vendor, and use mtrmac/image-spec:id-based-loader to fix tests

Anyone running (vndr) currently ends up with failing tests in OCI schema
validation because gojsonschema has fixed its "$ref" interpretation, exposing
inconsistent URI usage inside image-spec/schema.

So, this runs (vndr), and uses mtrmac/image-spec:id-based-loader
( https://github.com/opencontainers/image-spec/pull/739 ) to make the tests pass
again.  As soon as that PR is merged we should revert to using the upstream
image-spec repo again.
This commit is contained in:
Miloslav Trmač 2018-01-10 20:32:58 +01:00
parent 22c524b0e0
commit dc1567c8bc
170 changed files with 14354 additions and 1523 deletions

View File

@ -25,7 +25,7 @@ github.com/docker/distribution master
github.com/docker/libtrust master github.com/docker/libtrust master
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/opencontainers/runc master github.com/opencontainers/runc master
github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/image-spec 149252121d044fddff670adcdc67f33148e16226
# -- start OCI image validation requirements. # -- start OCI image validation requirements.
github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469 github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469

View File

@ -8,7 +8,10 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url"
"os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"time" "time"
@ -25,10 +28,9 @@ import (
const ( const (
dockerHostname = "docker.io" dockerHostname = "docker.io"
dockerV1Hostname = "index.docker.io"
dockerRegistry = "registry-1.docker.io" dockerRegistry = "registry-1.docker.io"
systemPerHostCertDirPath = "/etc/docker/certs.d"
resolvedPingV2URL = "%s://%s/v2/" resolvedPingV2URL = "%s://%s/v2/"
resolvedPingV1URL = "%s://%s/v1/_ping" resolvedPingV1URL = "%s://%s/v1/_ping"
tagsPath = "/v2/%s/tags/list" tagsPath = "/v2/%s/tags/list"
@ -49,6 +51,7 @@ var (
ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
// ErrUnauthorizedForCredentials is returned when the status code returned is 401 // ErrUnauthorizedForCredentials is returned when the status code returned is 401
ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password")
systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"}
) )
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
@ -67,6 +70,7 @@ type extensionSignatureList struct {
type bearerToken struct { type bearerToken struct {
Token string `json:"token"` Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"` ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"` IssuedAt time.Time `json:"issued_at"`
} }
@ -96,6 +100,24 @@ type authScope struct {
actions string actions string
} }
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
token := new(bearerToken)
if err := json.Unmarshal(blob, &token); err != nil {
return nil, err
}
if token.Token == "" {
token.Token = token.AccessToken
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
return token, nil
}
// this is cloned from docker/go-connections because upstream docker has changed // this is cloned from docker/go-connections because upstream docker has changed
// it and make deps here fails otherwise. // it and make deps here fails otherwise.
// We'll drop this once we upgrade to docker 1.13.x deps. // We'll drop this once we upgrade to docker 1.13.x deps.
@ -109,19 +131,42 @@ func serverDefault() *tls.Config {
} }
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
func dockerCertDir(ctx *types.SystemContext, hostPort string) string { func dockerCertDir(ctx *types.SystemContext, hostPort string) (string, error) {
if ctx != nil && ctx.DockerCertPath != "" { if ctx != nil && ctx.DockerCertPath != "" {
return ctx.DockerCertPath return ctx.DockerCertPath, nil
} }
var hostCertDir string
if ctx != nil && ctx.DockerPerHostCertDirPath != "" { if ctx != nil && ctx.DockerPerHostCertDirPath != "" {
hostCertDir = ctx.DockerPerHostCertDirPath return filepath.Join(ctx.DockerPerHostCertDirPath, hostPort), nil
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { }
var (
hostCertDir string
fullCertDirPath string
)
for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths {
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
} else { } else {
hostCertDir = systemPerHostCertDirPath hostCertDir = systemPerHostCertDirPath
} }
return filepath.Join(hostCertDir, hostPort)
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
_, err := os.Stat(fullCertDirPath)
if err == nil {
break
}
if os.IsNotExist(err) {
continue
}
if os.IsPermission(err) {
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
continue
}
if err != nil {
return "", err
}
}
return fullCertDirPath, nil
} }
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
@ -155,7 +200,10 @@ func newDockerClientWithDetails(ctx *types.SystemContext, registry, username, pa
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
// undocumented and may change if docker/docker changes. // undocumented and may change if docker/docker changes.
certDir := dockerCertDir(ctx, hostName) certDir, err := dockerCertDir(ctx, hostName)
if err != nil {
return nil, err
}
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
return nil, err return nil, err
} }
@ -202,6 +250,100 @@ func CheckAuth(ctx context.Context, sCtx *types.SystemContext, username, passwor
} }
} }
// SearchResult holds the information of each matching image
// It matches the output returned by the v1 endpoint
type SearchResult struct {
Name string `json:"name"`
Description string `json:"description"`
// StarCount states the number of stars the image has
StarCount int `json:"star_count"`
IsTrusted bool `json:"is_trusted"`
// IsAutomated states whether the image is an automated build
IsAutomated bool `json:"is_automated"`
// IsOfficial states whether the image is an official build
IsOfficial bool `json:"is_official"`
}
// SearchRegistry queries a registry for images that contain "image" in their name
// The limit is the max number of results desired
// Note: The limit value doesn't work with all registries
// for example registry.access.redhat.com returns all the results without limiting it to the limit value
func SearchRegistry(ctx context.Context, sCtx *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
type V2Results struct {
// Repositories holds the results returned by the /v2/_catalog endpoint
Repositories []string `json:"repositories"`
}
type V1Results struct {
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
if registry == dockerHostname {
registry = dockerV1Hostname
}
client, err := newDockerClientWithDetails(sCtx, registry, "", "", "", nil, "")
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
logrus.Debugf("trying to talk to v2 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v2 endpoint %q, status code %q", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
}
return searchRes, nil
}
}
// set up the query values for the v1 endpoint
u := url.URL{
Path: "/v1/search",
}
q := u.Query()
q.Set("q", image)
q.Set("n", strconv.Itoa(limit))
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint\n")
resp, err = client.makeRequest(ctx, "GET", u.String(), nil, nil)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v1 endpoint %q, status code %q", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
return nil, err
}
return v1Res.Results, nil
}
}
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) { func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
@ -332,18 +474,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
if err != nil { if err != nil {
return nil, err return nil, err
} }
var token bearerToken
if err := json.Unmarshal(tokenBlob, &token); err != nil { return newBearerTokenFromJSONBlob(tokenBlob)
return nil, err
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
return &token, nil
} }
// detectProperties detects various properties of the registry. // detectProperties detects various properties of the registry.

View File

@ -196,7 +196,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
return true, getBlobSize(res), nil return true, getBlobSize(res), nil
case http.StatusUnauthorized: case http.StatusUnauthorized:
logrus.Debugf("... not authorized") logrus.Debugf("... not authorized")
return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref)) return false, -1, client.HandleErrorResponse(res)
case http.StatusNotFound: case http.StatusNotFound:
logrus.Debugf("... not present") logrus.Debugf("... not present")
return false, -1, nil return false, -1, nil

View File

@ -463,9 +463,9 @@ func (a *Driver) isParent(id, parent string) bool {
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { func (a *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) {
if !a.isParent(id, parent) { if !a.isParent(id, parent) {
return a.naiveDiff.Diff(id, parent) return a.naiveDiff.Diff(id, parent, mountLabel)
} }
// AUFS doesn't need the parent layer to produce a diff. // AUFS doesn't need the parent layer to produce a diff.
@ -502,9 +502,9 @@ func (a *Driver) applyDiff(id string, diff io.Reader) error {
// DiffSize calculates the changes between the specified id // DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (a *Driver) DiffSize(id, parent string) (size int64, err error) { func (a *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
if !a.isParent(id, parent) { if !a.isParent(id, parent) {
return a.naiveDiff.DiffSize(id, parent) return a.naiveDiff.DiffSize(id, parent, mountLabel)
} }
// AUFS doesn't need the parent layer to calculate the diff size. // AUFS doesn't need the parent layer to calculate the diff size.
return directory.Size(path.Join(a.rootPath(), "diff", id)) return directory.Size(path.Join(a.rootPath(), "diff", id))
@ -513,9 +513,9 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
// ApplyDiff extracts the changeset from the given diff into the // ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { func (a *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) {
if !a.isParent(id, parent) { if !a.isParent(id, parent) {
return a.naiveDiff.ApplyDiff(id, parent, diff) return a.naiveDiff.ApplyDiff(id, parent, mountLabel, diff)
} }
// AUFS doesn't need the parent id to apply the diff if it is the direct parent. // AUFS doesn't need the parent id to apply the diff if it is the direct parent.
@ -523,14 +523,14 @@ func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err e
return return
} }
return a.DiffSize(id, parent) return a.DiffSize(id, parent, mountLabel)
} }
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { func (a *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
if !a.isParent(id, parent) { if !a.isParent(id, parent) {
return a.naiveDiff.Changes(id, parent) return a.naiveDiff.Changes(id, parent, mountLabel)
} }
// AUFS doesn't have snapshots, so we need to get changes from all parent // AUFS doesn't have snapshots, so we need to get changes from all parent

View File

@ -92,19 +92,19 @@ type ProtoDriver interface {
type DiffDriver interface { type DiffDriver interface {
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
Diff(id, parent string) (io.ReadCloser, error) Diff(id, parent, mountLabel string) (io.ReadCloser, error)
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error) Changes(id, parent, mountLabel string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the // ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
// The io.Reader must be an uncompressed stream. // The io.Reader must be an uncompressed stream.
ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error)
// DiffSize calculates the changes between the specified id // DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error) DiffSize(id, parent, mountLabel string) (size int64, err error)
} }
// Driver is the interface for layered/snapshot file system drivers. // Driver is the interface for layered/snapshot file system drivers.

View File

@ -31,10 +31,10 @@ type NaiveDiffDriver struct {
// NewNaiveDiffDriver returns a fully functional driver that wraps the // NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which // given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own: // it may or may not support on its own:
// Diff(id, parent string) (io.ReadCloser, error) // Diff(id, parent, mountLabel string) (io.ReadCloser, error)
// Changes(id, parent string) ([]archive.Change, error) // Changes(id, parent, mountLabel string) ([]archive.Change, error)
// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) // ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error) // DiffSize(id, parent, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
return &NaiveDiffDriver{ProtoDriver: driver, return &NaiveDiffDriver{ProtoDriver: driver,
uidMaps: uidMaps, uidMaps: uidMaps,
@ -43,11 +43,11 @@ func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Dr
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadCloser, err error) {
startTime := time.Now() startTime := time.Now()
driver := gdw.ProtoDriver driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "") layerFs, err := driver.Get(id, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -70,7 +70,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
}), nil }), nil
} }
parentFs, err := driver.Get(parent, "") parentFs, err := driver.Get(parent, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -101,10 +101,10 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { func (gdw *NaiveDiffDriver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
driver := gdw.ProtoDriver driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "") layerFs, err := driver.Get(id, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -113,7 +113,7 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
parentFs := "" parentFs := ""
if parent != "" { if parent != "" {
parentFs, err = driver.Get(parent, "") parentFs, err = driver.Get(parent, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -126,11 +126,11 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the // ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { func (gdw *NaiveDiffDriver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) {
driver := gdw.ProtoDriver driver := gdw.ProtoDriver
// Mount the root filesystem so we can apply the diff/layer. // Mount the root filesystem so we can apply the diff/layer.
layerFs, err := driver.Get(id, "") layerFs, err := driver.Get(id, mountLabel)
if err != nil { if err != nil {
return return
} }
@ -151,15 +151,15 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i
// DiffSize calculates the changes between the specified layer // DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { func (gdw *NaiveDiffDriver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
driver := gdw.ProtoDriver driver := gdw.ProtoDriver
changes, err := gdw.Changes(id, parent) changes, err := gdw.Changes(id, parent, mountLabel)
if err != nil { if err != nil {
return return
} }
layerFs, err := driver.Get(id, "") layerFs, err := driver.Get(id, mountLabel)
if err != nil { if err != nil {
return return
} }

View File

@ -699,9 +699,9 @@ func (d *Driver) isParent(id, parent string) bool {
} }
// ApplyDiff applies the new layer into a root // ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) {
if !d.isParent(id, parent) { if !d.isParent(id, parent) {
return d.naiveDiff.ApplyDiff(id, parent, diff) return d.naiveDiff.ApplyDiff(id, parent, mountLabel, diff)
} }
applyDir := d.getDiffPath(id) applyDir := d.getDiffPath(id)
@ -728,18 +728,18 @@ func (d *Driver) getDiffPath(id string) string {
// DiffSize calculates the changes between the specified id // DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) { func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) { if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.DiffSize(id, parent) return d.naiveDiff.DiffSize(id, parent, mountLabel)
} }
return directory.Size(d.getDiffPath(id)) return directory.Size(d.getDiffPath(id))
} }
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { func (d *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) { if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.Diff(id, parent) return d.naiveDiff.Diff(id, parent, mountLabel)
} }
diffPath := d.getDiffPath(id) diffPath := d.getDiffPath(id)
@ -754,9 +754,9 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) { if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.Changes(id, parent) return d.naiveDiff.Changes(id, parent, mountLabel)
} }
// Overlay doesn't have snapshots, so we need to get changes from all parent // Overlay doesn't have snapshots, so we need to get changes from all parent
// layers. // layers.

View File

@ -472,7 +472,7 @@ func (d *Driver) Cleanup() error {
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
// The layer should be mounted when calling this function // The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { func (d *Driver) Diff(id, parent, mountLabel string) (_ io.ReadCloser, err error) {
panicIfUsedByLcow() panicIfUsedByLcow()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
@ -509,7 +509,7 @@ func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should not be mounted when calling this function. // The layer should not be mounted when calling this function.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
panicIfUsedByLcow() panicIfUsedByLcow()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
@ -565,7 +565,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
// The layer should not be mounted when calling this function // The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (int64, error) {
panicIfUsedByLcow() panicIfUsedByLcow()
var layerChain []string var layerChain []string
if parent != "" { if parent != "" {
@ -600,14 +600,14 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
// DiffSize calculates the changes between the specified layer // DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) { func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
panicIfUsedByLcow() panicIfUsedByLcow()
rPId, err := d.resolveID(parent) rPId, err := d.resolveID(parent)
if err != nil { if err != nil {
return return
} }
changes, err := d.Changes(id, rPId) changes, err := d.Changes(id, rPId, mountLabel)
if err != nil { if err != nil {
return return
} }

View File

@ -778,11 +778,11 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st
} }
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
from, to, _, err := r.findParentAndLayer(from, to) from, to, toLayer, err := r.findParentAndLayer(from, to)
if err != nil { if err != nil {
return nil, ErrLayerUnknown return nil, ErrLayerUnknown
} }
return r.driver.Changes(to, from) return r.driver.Changes(to, from, toLayer.MountLabel)
} }
type simpleGetCloser struct { type simpleGetCloser struct {
@ -855,7 +855,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
} }
if from != toLayer.Parent { if from != toLayer.Parent {
diff, err := r.driver.Diff(to, from) diff, err := r.driver.Diff(to, from, toLayer.MountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -867,7 +867,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, err return nil, err
} }
diff, err := r.driver.Diff(to, from) diff, err := r.driver.Diff(to, from, toLayer.MountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -906,11 +906,12 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
} }
func (r *layerStore) DiffSize(from, to string) (size int64, err error) { func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
from, to, _, err = r.findParentAndLayer(from, to) var toLayer *Layer
from, to, toLayer, err = r.findParentAndLayer(from, to)
if err != nil { if err != nil {
return -1, ErrLayerUnknown return -1, ErrLayerUnknown
} }
return r.driver.DiffSize(to, from) return r.driver.DiffSize(to, from, toLayer.MountLabel)
} }
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
@ -950,7 +951,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil { if err != nil {
return -1, err return -1, err
} }
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload) size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, layer.MountLabel, payload)
if err != nil { if err != nil {
return -1, err return -1, err
} }

View File

@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: layers.go // source: layers.go. Hack to make this work on github.com
package storage package storage

View File

@ -2,14 +2,11 @@ package storage
import ( import (
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"sync" "sync"
"time" "time"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sys/unix"
) )
// A Locker represents a file lock where the file is used to cache an // A Locker represents a file lock where the file is used to cache an
@ -33,16 +30,8 @@ type Locker interface {
IsReadWrite() bool IsReadWrite() bool
} }
type lockfile struct {
mu sync.Mutex
file string
fd uintptr
lw string
locktype int16
}
var ( var (
lockfiles map[string]*lockfile lockfiles map[string]Locker
lockfilesLock sync.Mutex lockfilesLock sync.Mutex
) )
@ -52,7 +41,7 @@ func GetLockfile(path string) (Locker, error) {
lockfilesLock.Lock() lockfilesLock.Lock()
defer lockfilesLock.Unlock() defer lockfilesLock.Unlock()
if lockfiles == nil { if lockfiles == nil {
lockfiles = make(map[string]*lockfile) lockfiles = make(map[string]Locker)
} }
cleanPath := filepath.Clean(path) cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok { if locker, ok := lockfiles[cleanPath]; ok {
@ -61,12 +50,10 @@ func GetLockfile(path string) (Locker, error) {
} }
return locker, nil return locker, nil
} }
fd, err := unix.Open(cleanPath, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) locker, err := getLockFile(path, false) // platform dependent locker
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error opening %q", cleanPath) return nil, err
} }
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}
lockfiles[filepath.Clean(path)] = locker lockfiles[filepath.Clean(path)] = locker
return locker, nil return locker, nil
} }
@ -77,7 +64,7 @@ func GetROLockfile(path string) (Locker, error) {
lockfilesLock.Lock() lockfilesLock.Lock()
defer lockfilesLock.Unlock() defer lockfilesLock.Unlock()
if lockfiles == nil { if lockfiles == nil {
lockfiles = make(map[string]*lockfile) lockfiles = make(map[string]Locker)
} }
cleanPath := filepath.Clean(path) cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok { if locker, ok := lockfiles[cleanPath]; ok {
@ -86,99 +73,10 @@ func GetROLockfile(path string) (Locker, error) {
} }
return locker, nil return locker, nil
} }
fd, err := unix.Open(cleanPath, os.O_RDONLY, 0) locker, err := getLockFile(path, true) // platform dependent locker
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error opening %q", cleanPath) return nil, err
} }
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}
lockfiles[filepath.Clean(path)] = locker lockfiles[filepath.Clean(path)] = locker
return locker, nil return locker, nil
} }
// Lock locks the lock file
func (l *lockfile) Lock() {
lk := unix.Flock_t{
Type: l.locktype,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
l.mu.Lock()
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
// Unlock unlocks the lock file
func (l *lockfile) Unlock() {
lk := unix.Flock_t{
Type: unix.F_UNLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
l.mu.Unlock()
}
// Touch updates the lock file with the UID of the user
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return err
}
n, err := unix.Write(int(l.fd), id)
if err != nil {
return err
}
if n != len(id) {
return unix.ENOSPC
}
err = unix.Fsync(int(l.fd))
if err != nil {
return err
}
return nil
}
// Modified indicates if the lock file has been updated since the last time it was loaded
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
}
n, err := unix.Read(int(l.fd), id)
if err != nil {
return true, err
}
if n != len(id) {
return true, unix.ENOSPC
}
lw := l.lw
l.lw = string(id)
return l.lw != lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(statTMtimeUnix(st))
return when.Before(touched)
}
// IsRWLock indicates if the lock file is a read-write lock
func (l *lockfile) IsReadWrite() bool {
return (l.locktype == unix.F_WRLCK)
}

View File

@ -0,0 +1,19 @@
// +build darwin freebsd
package storage
import (
"time"
"golang.org/x/sys/unix"
)
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtimespec.Unix())
return when.Before(touched)
}

20
vendor/github.com/containers/storage/lockfile_linux.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// +build linux solaris
package storage
import (
"time"
"golang.org/x/sys/unix"
)
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtim.Unix())
return when.Before(touched)
}

115
vendor/github.com/containers/storage/lockfile_unix.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
// +build linux solaris darwin freebsd
package storage
import (
"os"
"sync"
"time"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func getLockFile(path string, ro bool) (Locker, error) {
var fd int
var err error
if ro {
fd, err = unix.Open(path, os.O_RDONLY, 0)
} else {
fd, err = unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
}
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", path)
}
unix.CloseOnExec(fd)
if ro {
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}, nil
}
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}, nil
}
type lockfile struct {
mu sync.Mutex
file string
fd uintptr
lw string
locktype int16
}
// Lock locks the lock file
func (l *lockfile) Lock() {
lk := unix.Flock_t{
Type: l.locktype,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
l.mu.Lock()
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
// Unlock unlocks the lock file
func (l *lockfile) Unlock() {
lk := unix.Flock_t{
Type: unix.F_UNLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
l.mu.Unlock()
}
// Touch updates the lock file with the UID of the user
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return err
}
n, err := unix.Write(int(l.fd), id)
if err != nil {
return err
}
if n != len(id) {
return unix.ENOSPC
}
err = unix.Fsync(int(l.fd))
if err != nil {
return err
}
return nil
}
// Modified indicates if the lock file has been updated since the last time it was loaded
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
}
n, err := unix.Read(int(l.fd), id)
if err != nil {
return true, err
}
if n != len(id) {
return true, unix.ENOSPC
}
lw := l.lw
l.lw = string(id)
return l.lw != lw, nil
}
// IsRWLock indicates if the lock file is a read-write lock
func (l *lockfile) IsReadWrite() bool {
return (l.locktype == unix.F_WRLCK)
}

View File

@ -0,0 +1,40 @@
// +build windows
package storage
import (
"os"
"sync"
"time"
)
func getLockFile(path string, ro bool) (Locker, error) {
return &lockfile{}, nil
}
type lockfile struct {
mu sync.Mutex
file string
}
func (l *lockfile) Lock() {
}
func (l *lockfile) Unlock() {
}
func (l *lockfile) Modified() (bool, error) {
return false, nil
}
func (l *lockfile) Touch() error {
return nil
}
func (l *lockfile) IsReadWrite() bool {
return false
}
func (l *lockfile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
return true
}
return when.Before(stat.ModTime())
}

View File

@ -77,10 +77,7 @@ func (idx *TruncIndex) addID(id string) error {
func (idx *TruncIndex) Add(id string) error { func (idx *TruncIndex) Add(id string) error {
idx.Lock() idx.Lock()
defer idx.Unlock() defer idx.Unlock()
if err := idx.addID(id); err != nil { return idx.addID(id)
return err
}
return nil
} }
// Delete removes an ID from the TruncIndex. If there are multiple IDs // Delete removes an ID from the TruncIndex. If there are multiple IDs
@ -128,8 +125,13 @@ func (idx *TruncIndex) Get(s string) (string, error) {
return "", ErrNotExist return "", ErrNotExist
} }
// Iterate iterates over all stored IDs, and passes each of them to the given handler. // Iterate iterates over all stored IDs and passes each of them to the given
// handler. Take care that the handler method does not call any public
// method on truncindex as the internal locking is not reentrant/recursive
// and will result in deadlock.
func (idx *TruncIndex) Iterate(handler func(id string)) { func (idx *TruncIndex) Iterate(handler func(id string)) {
idx.Lock()
defer idx.Unlock()
idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
handler(string(prefix)) handler(string(prefix))
return nil return nil

View File

@ -1,11 +0,0 @@
// +build linux solaris
package storage
import (
"golang.org/x/sys/unix"
)
func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
return st.Mtim.Unix()
}

View File

@ -1,11 +0,0 @@
// +build !linux,!solaris
package storage
import (
"golang.org/x/sys/unix"
)
func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
return st.Mtimespec.Unix()
}

View File

@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and // when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe" // "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe // Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew package spew
@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil)) ptrSize = unsafe.Sizeof((*byte)(nil))
) )
var ( type flag uintptr
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = ptrSize
offsetScalar = uintptr(0)
offsetFlag = ptrSize * 2
// flagKindWidth and flagKindShift indicate various bits that the var (
// reflect package uses internally to track kind information. // flagRO indicates whether the value field of a reflect.Value
// // is read-only.
// flagRO indicates whether or not the value field of a reflect.Value is flagRO flag
// read-only.
// // flagAddr indicates whether the address of the reflect.Value's
// flagIndir indicates whether the value field of a reflect.Value is // value may be taken.
// the actual data or a pointer to the data. flagAddr flag
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = flagKindWidth - 1
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
) )
func init() { // flagKindMask holds the bits that make up the kind
// Older versions of reflect.Value stored small integers directly in the // part of the flags field. In all the supported versions,
// ptr field (which is named val in the older versions). Versions // it is in the lower 5 bits.
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named const flagKindMask = flag(0x1f)
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low // Different versions of Go have used different
// order bits are the kind. This code extracts the kind from the flags // bit layouts for the flags type. This table
// field and ensures it's the correct type. When it's not, the flag // records the known combinations.
// order has been changed to the newer format, so the flags are updated var okFlags = []struct {
// accordingly. ro, addr flag
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) }{{
upfv := *(*uintptr)(upf) // From Go 1.4 to 1.5
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift) ro: 1 << 5,
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) { addr: 1 << 7,
flagKindShift = 0 }, {
flagRO = 1 << 5 // Up to Go tip.
flagIndir = 1 << 6 ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
// Commit adf9b30e5594 modified the flags to separate the var flagValOffset = func() uintptr {
// flagRO flag into two bits which specifies whether or not the field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
// field is embedded. This causes flagIndir to move over a bit if !ok {
// and means that flagRO is the combination of either of the panic("reflect.Value has no flag field")
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
} }
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
} }
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses // unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error // This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and // interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields. // inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { func unsafeReflectValue(v reflect.Value) reflect.Value {
indirects := 1 if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
vt := v.Type() return v
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
} }
} flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
pv := reflect.NewAt(vt, upv) *flagFieldPtr |= flagAddr
rv = pv return v
for i := 0; i < indirects; i++ { }
rv = rv.Elem()
} // Sanity checks against future reflect package changes
return rv // to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
panic("reflect.Value read-only flag has changed semantics")
} }

View File

@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or // when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe" // "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe // +build js appengine safe disableunsafe !go1.4
package spew package spew

View File

@ -205,13 +205,18 @@ type tags struct {
func (t *tags) All(ctx context.Context) ([]string, error) { func (t *tags) All(ctx context.Context) ([]string, error) {
var tags []string var tags []string
u, err := t.ub.BuildTagsURL(t.name) listURLStr, err := t.ub.BuildTagsURL(t.name)
if err != nil {
return tags, err
}
listURL, err := url.Parse(listURLStr)
if err != nil { if err != nil {
return tags, err return tags, err
} }
for { for {
resp, err := t.client.Get(u) resp, err := t.client.Get(listURL.String())
if err != nil { if err != nil {
return tags, err return tags, err
} }
@ -231,7 +236,13 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
} }
tags = append(tags, tagsResponse.Tags...) tags = append(tags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" { if link := resp.Header.Get("Link"); link != "" {
u = strings.Trim(strings.Split(link, ";")[0], "<>") linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return tags, err
}
listURL = listURL.ResolveReference(linkURL)
} else { } else {
return tags, nil return tags, nil
} }

View File

@ -1,7 +1,7 @@
github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702

View File

@ -51,7 +51,7 @@ Find more [FAQ on the OCI site](https://www.opencontainers.org/faq).
## Roadmap ## Roadmap
The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the OCI v1.0.0 release in late 2016. The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the future improvements.
# Contributing # Contributing

View File

@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Code generated by "esc -private -pkg=schema -include=.*\.json$ ."; DO NOT EDIT.
package schema package schema
import ( import (
@ -170,7 +172,7 @@ func _escFSByte(useLocal bool, name string) ([]byte, error) {
return nil, err return nil, err
} }
b, err := ioutil.ReadAll(f) b, err := ioutil.ReadAll(f)
f.Close() _ = f.Close()
return b, err return b, err
} }
f, err := _escStatic.prepare(name) f, err := _escStatic.prepare(name)
@ -205,27 +207,27 @@ var _escData = map[string]*_escFile{
"/config-schema.json": { "/config-schema.json": {
local: "config-schema.json", local: "config-schema.json",
size: 2771, size: 2771,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/+RWQW/bPAy9+1cYbo9t/R2+U67dbgMyINh2KIZAsemEnSVqFD3MGPLfB8vJZtmym3XI H4sIAAAAAAAC/+RWQY/TPBC951dE2T22m+/wnXot3JCKVAGHFarcZNLOEnvMeIKIUP87itNCkjpp6apc
aScDFB/f4xMl60eSplkJrmC0gmSyVZqtLZhHMqLQAKePZCrcpxsLBVZYKJ9118FuXXEArTrIQcSu8vzZ OEUaz7z35nns+EcUx0kOLmO0gmSSRZysLJglGVFogOMlmQJ38dpChgVmymfNmrJHl+1Bq6ZkL2IXafri
kbnvow/E+7xkVcn9f//nfeymx2F5hrhVnpMFU5zZnIf12TlqtYe88Pw9UloLHZZ2z1BIH7NMFlgQXLZK yMzb6BPxLs1ZFTL/7/+0jT20dZifStwiTcmCyU5szpe12SlqtYM08/xtpdQWmlravkAmbcwyWWBBcMki
u3bSNCsYlED5KzCAOmE0fTkfr4i1km6lVAL3ghoyv3bsUzLVyIF4oVSYzcUBBQppGC7FkLs08+RFJHvg btqJ4yRjUAL5r0Cn1AmjaeF8vCDWSpqVXAnMBTUkfu3QpiSqkj3xBFQ/m7M9CmRSMVxbQ+7azKMXgeyO
iI9HXPHxDw44iMwwDlh9ztvvlhyU74nFjfG3DJU3ECr30I3ATV5ChQa7UXG5VnbjK697jfH65tucLMWs Iz4ecMXHPzjgXmSEscPqc95+t+Qgf08sblj/yFB4A6FwT80IPKQ5FGiwGRWXamXXHnnVagzjm29jshSz
2uxuuIQCeixjoZE0Pc6QCreW0MiYmwysu56eAoKQblHigswXpIZyR5IXVZimrsNKwzqfoxY86vKf7f0j qpNZdwkF9FDGRCNxfBghFa4toZEhNxlYNT099wj6dJMSJ2RekNqXO5A8qcJUZdlH6uJ8Dlqw1Pk/2/tH
1Y2GyThf2P9rp/7aXX0i/oJm/wZfdc7fqR3U17ZkE9n4a1qyEbIb3BtVX2xJMvyer18mkip6WV96/ZZY KisN7sb+b536e3f1ifgLmt0bvOmcv1NbKO9tyTqw8fe0ZC1k17gzqrzakqj7PV2/TCSFe831m2NRbDB3
VVssJwZf/6475S91H9CCafRkx7NatcAuizuejFgzhq8Nsv8PP0U8GKtLhhXPnh/QCXEbMz00K2LU3PbM f/+uO+ZPdd+jBVPpsx1PSlUDuyTseDRgTRi+Vsj+P/wc8GCoLuoinjzfoxPiOmR636yAUWPbM75BwbfD
b1D07fCyW0vviMlWxN4UcYpZ/Enjdtf+RQ3SGiZ/vj8oANpKu/UTMV9kR1SDMjPzGZ6y5MQwnZvwWfX7 Zbem3hGB8T5/U1ze1FlA42ZbvwKDtIazP98fAIC2Um/8RIyDbIlKUGZkPvunLDoynM9N/1n1+9nUP5dR
2RSey6SbnWPyMwAA//9KY9sL0woAAA== MzuH6GcAAAD//0pj2wvTCgAA
`, `,
}, },
"/content-descriptor.json": { "/content-descriptor.json": {
local: "content-descriptor.json", local: "content-descriptor.json",
size: 1085, size: 1085,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/5yTwW7UMBCG73mKUVqpl27NoeIQVb3AnQPcEAevPY6nbGwznlW1oL47mniXJoAo3Vsy H4sIAAAAAAAC/5yTwW7UMBCG73mKUVqpl27NoeIQVb3AnQPcEAevPY6nbGwznlW1oL47mniXJoAo3Vsy
+r+Zz8n4RwfQe6yOqQjl1A/QfyiY3uUklhIy6BMmgffHUGb4WNBRIGdn4lpbXFYXcbKKR5EyGPNQc9q0 +r+Zz8n4RwfQe6yOqQjl1A/QfyiY3uUklhIy6BMmgffHUGb4WNBRIGdn4lpbXFYXcbKKR5EyGPNQc9q0
6k3m0Xi2QTZvbk2rXTSO/AmpgzG5YHKnyXXGWtr4X9MbJ4eCSubtAzpptcK5IAth7QfQgwH0E3qyn1q4 6k3m0Xi2QTZvbk2rXTSO/AmpgzG5YHKnyXXGWtr4X9MbJ4eCSubtAzpptcK5IAth7QfQgwH0E3qyn1q4
lf48r0SEOadNIQfQAmNAxuTQw2LGjF8yBuU8hrp5FrvRE18Yj4ESae9qnqfP7FNr0Vf6/pKPRoASbA+C lf48r0SEOadNIQfQAmNAxuTQw2LGjF8yBuU8hrp5FrvRE18Yj4ESae9qnqfP7FNr0Vf6/pKPRoASbA+C
@ -238,9 +240,9 @@ ERcrb5b9zhBc4s2zO7r2jN/2xKhin3+/McttXS9NB/Cle+p+BgAA///HjexwPQQAAA==
"/defs-descriptor.json": { "/defs-descriptor.json": {
local: "defs-descriptor.json", local: "defs-descriptor.json",
size: 922, size: 922,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/6STX2/TMBTF3/spLl7FgDZN4QFp0Ria2DsP42lTV93ZN/Ed8R/ZrqYy9bsjJ1naFYFA H4sIAAAAAAAC/6STX2/TMBTF3/spLl7FgDZN4QFp0Ria2DsP42lTV93ZN/Ed8R/ZrqYy9bsjJ1naFYFA
PCSyj67Pub8b52kCIBRFGdgndlZUIK6oZst5F8FjSCw3LQZIDr56sl+cTciWAlwNx1yAa0+Sa5bYecx7 PCSyj67Pub8b52kCIBRFGdgndlZUIK6oZst5F8FjSCw3LQZIDr56sl+cTciWAlwNx1yAa0+Sa5bYecx7
09FFVJBzAIQhxfht62mUAASrnKpT8rEqS+fJyueMuHChKaPUZLBkgw2Vakwt927zZ6/Ue4uYAttmr3tM 09FFVJBzAIQhxfht62mUAASrnKpT8rEqS+fJyueMuHChKaPUZLBkgw2Vakwt927zZ6/Ue4uYAttmr3tM
iUKHd3d7Wdxg8WNZnK32y1cn09fF3XoxWz0t5+8/fNyVf1c2FV3Erk8SihuK6ZDuaLhJE8iw9ck1Ab1m iUKHd3d7Wdxg8WNZnK32y1cn09fF3XoxWz0t5+8/fNyVf1c2FV3Erk8SihuK6ZDuaLhJE8iw9ck1Ab1m
@ -254,9 +256,9 @@ ELULBvNXEJvAYtB3LzDQWpfw5fX8n7t46Dc2PQ1UZz9FdVw8RGdPyoPfojTor7ve+/cw50l+dpOfAQAA
"/defs.json": { "/defs.json": {
local: "defs.json", local: "defs.json",
size: 1670, size: 1670,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/7STza6bMBCF9zzFyO2S9oJtbGDb7hMpy6oLSiaJq2AjY6RWEe9e8RNChFuJKneRgGc8 H4sIAAAAAAAC/7STza6bMBCF9zzFyO2S9oJtbGDb7hMpy6oLSiaJq2AjY6RWEe9e8RNChFuJKneRgGc8
3zmeMbcAgByxKa2qnTKa5EC+4klp1a8aaBs8grtY054vpnXgLgi7GvUXo12hNFo41FiqkyqLoTwceTOA 3zmeMbcAgByxKa2qnTKa5EC+4klp1a8aaBs8grtY054vpnXgLgi7GvUXo12hNFo41FiqkyqLoTwceTOA
5NBLABClXTqvAIj7XWOvprTDM9qhckhUSquqrUgOn2KaPsLFrykcUzkEu3Amx2IrmlEpfPA+vsIzuhVP 5NBLABClXTqvAIj7XWOvprTDM9qhckhUSquqrUgOn2KaPsLFrykcUzkEu3Amx2IrmlEpfPA+vsIzuhVP
Yy55ygT3aczJlZDgW4UyShmTNGIiTbiUIooij6Jn15N0+x/T8enQJFlxN8/GBxZJwtbozXPxoTnNeCYk Yy55ygT3aczJlZDgW4UyShmTNGIiTbiUIooij6Jn15N0+x/T8enQJFlxN8/GBxZJwtbozXPxoTnNeCYk
@ -269,29 +271,29 @@ fIvD7in0ryMEy+fK1G6UfmdTE+tvpoL+1wV/AgAA//96IpqyhgYAAA==
"/image-index-schema.json": { "/image-index-schema.json": {
local: "image-index-schema.json", local: "image-index-schema.json",
size: 2993, size: 2993,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/6yWv27bMBDGdz/FQQmQJYmKIuhgBFnaJVOHBl2KDAx5ki61SPVIJ3ELv3tBMrIlUXZt H4sIAAAAAAAC/6yWz0/jOhDH7/0rRgGJC5CnJ/QOFeLy9sJpD4v2suJg7EkybGNnx1Ogu+r/vrJN2qRJ
1Zt95H33+07892cGkCm0kqlxZHQ2h+xrg/qz0U6QRob7WpQI91rhG3xrUFJBUoSplz733MoKa+HzKuea C4Te2rHnO5/vxL/+zAAyg14zNULOZnPIvjZo/3dWFFlkuK1ViXBrDb7AtwY1FaRVnHoeck+9rrBWIa8S
eZ4/W6OvYvTacJkrFoW7+nCTx9hZzCPVpth5npsGtWxL2pAWZ+fky+fky8dEt2rQp5qnZ5Quxho2DbIj aeZ5/uidvUjRS8dlblgVcvHPVZ5iJymPTJvi53nuGrS6LeljWpqdUyifUyifEmXVYEh1D4+oJcUadg2y
tNkcvCWALOZ/R7bRVgynbh8qslAQLhTYaA8tuAohVIZQGaIYvEQ1EBaEBtIOS+SAEJQneMr7mBup1mVS EPpsDsESQJbyvyP7ZCuFh27vKvJQEC4M+GQPPUiFECtDrAxJDJ6SGigPygJZwRI5IkTlCZ7yPuZGqnU5
oyZN9bLO5vBxGxNvbSyE1nEkq4WmAq2zXfutsmAWqw67w7o772g7bbEv7+01W+jxr/Y+wvhrSYy+1o9N qFGTpXpZZ3P4dxtTL20shtZpJKuVpQK9+K79Vlkxq1WHXbDuzvuwnbbYl9f2ui30+Fd7HWH8tSTGUOvH
1MOjIvHg0y67YUu/BxFFJVqXbUKPHfGRhZHI9wfSBeLXQpjtPYApwuJgLJBRS1SQWAoi54yFz1ZY2Cu1 Jhrg0ZC6C2nn3bCn3zsRQyV6yTah+474yMIYyPcHhgskrIU4O3gAV8TFwVggo9VoYGApipwyFiHbYOEv
6cm13x1nucKCNPkKNt+SdBTWqelDOP1EIA1PK4d2EusIIGn36WY33Hv/D8GTvGqcKVk0FUmQFcqfdllD zKYnl2F3nOQGC7IUKvh8S9JRWA9Nv4czTASy8LAS9JNYRwDJyn9X++Fe+/8ePM2rRlzJqqlIg65Q//TL
VGhxI+Olt+H/NsI5ZA0Xt2JRGiZX1XfzW78WFaq7i+l9H66boa8lL4arJnUlYEER3U+Hgk0NrxXJCpw/ GpJCi5sYz4ON8LdRIsgWzq7VonRMUtU38+uwFg2am7Ppfd9dN7u+lrzwb7pSsKCEHqZDwa6G54p0BRLO
V6IXqMUKnhCUedULIxSq6dSBaidzsxCuMFyn3Mdt5o3OgHPnNoY9WzmMCZYVOZRuyTjIA8hMz1NvD8Pe leQFarWCBwTjnu3CKYNmOnWk2svcLJQUjush98c280Znh3PvNj60leOYYl2RoJYl404eQOZ6nnp7+PA+
fZxqp+OT3ed7oTvtsI5Jl9lgwnrM5inxjD0N1PVLckueAm4jexrIAoX/Dqdu4VZ3D2b/suyWTa7Ng00C HmoPxye7zw9Cd9rhhcmW2c6E9ZjNY+I5fxyoy6fBLXkMuI3scSALVOE7HLuFW90DmP3Lslt2cG2+2yTA
rP9p+0UwCZ0erof0cLbrX//IEFobFx50I6fdcV3dHlx5V3XyWdcVmY15aX+te8+ecUeTXmdjNv7HgAcN +k3bT4pJWRm3/EYPZ/v+9Y8MZa2T+KDznz01tgdX3lWdfNZ1RWZjXtpf696zZ9zRpNfZmI3PGAigEXN4
mOlZmY29BDtPuBnA42w9+xsAAP//IKe/nbELAAA= VmZjL8HOE24GcD9bz/4GAAD//yCnv52xCwAA
`, `,
}, },
"/image-layout-schema.json": { "/image-layout-schema.json": {
local: "image-layout-schema.json", local: "image-layout-schema.json",
size: 439, size: 439,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/2yPQUvEMBCF7/0VQ/Sg4DYVPOW6pwVhD4IX8VDTaTvLNonJVFik/12SaRXRU5g38+W9 H4sIAAAAAAAC/2yPQUvEMBCF7/0VQ/Sg4DYVPOW6pwVhD4IX8VDTaTvLNonJVFik/12SaRXRU5g38+W9
91kBqA6TjRSYvFMG1DGg23vHLTmMcJjaAeGxvfiZ4cmOOLXqLlPXSQYDamQORutT8m4nau3joLvY9rxr 91kBqA6TjRSYvFMG1DGg23vHLTmMcJjaAeGxvfiZ4cmOOLXqLlPXSQYDamQORutT8m4nau3joLvY9rxr
HrRoV8JRtyHJaO0DOruZpYLJtaZsrM/FWEi+BMysfzuhXbUQfcDIhEkZyG2yQyYl8TPGJLVk97fth1yA HrRoV8JRtyHJaO0DOruZpYLJtaZsrM/FWEi+BMysfzuhXbUQfcDIhEkZyG2yQyYl8TPGJLVk97fth1yA
74FHhOP+8LvyDbmy8JZ2EgZ6OuNtsS8fbrESR3LDj45unpSBl3UGUPd1UzdqnV/Lu1QAS2kS8X2miN03 74FHhOP+8LvyDbmy8JZ2EgZ6OuNtsS8fbrESR3LDj45unpSBl3UGUPd1UzdqnV/Lu1QAS2kS8X2miN03
@ -302,9 +304,9 @@ HrRoV8JRtyHJaO0DOruZpYLJtaZsrM/FWEi+BMysfzuhXbUQfcDIhEkZyG2yQyYl8TPGJLVk97fth1yA
"/image-manifest-schema.json": { "/image-manifest-schema.json": {
local: "image-manifest-schema.json", local: "image-manifest-schema.json",
size: 921, size: 921,
modtime: 1498025574, modtime: 1515512099,
compressed: ` compressed: `
H4sIAAAJbogA/5ySMW8iMRCF+/0VI0MJ+O501bZXUZxSJEoTpXB2x7uDWNsZmygo4r9HtnHAkCKifTvv H4sIAAAAAAAC/5ySMW8iMRCF+/0VI0MJ+O501bZXUZxSJEoTpXB2x7uDWNsZmygo4r9HtnHAkCKifTvv
zTdv/dEAiB59x+QCWSNaEHcOzT9rgiKDDOtJDQj/lSGNPsC9w440dSpNL6J97rsRJxWtYwiulXLjrVlm zTdv/dEAiB59x+QCWSNaEHcOzT9rgiKDDOtJDQj/lSGNPsC9w440dSpNL6J97rsRJxWtYwiulXLjrVlm
dWV5kD0rHZa//sqszbKP+mLxrZTWoenKVp9seVpSJJDTkSB7w95hdNuXDXZHzbF1yIHQixbiYQAiRzwi dWV5kD0rHZa//sqszbKP+mLxrZTWoenKVp9seVpSJJDTkSB7w95hdNuXDXZHzbF1yIHQixbiYQAiRzwi
+3xclq9vfhjJgybc9uDzheghjAhpOZTlkPPgLQeC8qAMkAk4ICeKFH7bZbKG/Uort16tmcjQtJtEC39O +3xclq9vfhjJgybc9uDzheghjAhpOZTlkPPgLQeC8qAMkAk4ICeKFH7bZbKG/Uort16tmcjQtJtEC39O
@ -316,6 +318,6 @@ Dj+ZAwAA
"/": { "/": {
isDir: true, isDir: true,
local: "/", local: "",
}, },
} }

View File

@ -0,0 +1,126 @@
// Copyright 2018 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/xeipuuv/gojsonreference"
"github.com/xeipuuv/gojsonschema"
)
// fsLoaderFactory implements gojsonschema.JSONLoaderFactory by reading files under the specified namespaces from the root of fs.
type fsLoaderFactory struct {
namespaces []string
fs http.FileSystem
}
// newFSLoaderFactory returns a fsLoaderFactory reading files under the specified namespaces from the root of fs.
func newFSLoaderFactory(namespaces []string, fs http.FileSystem) *fsLoaderFactory {
return &fsLoaderFactory{
namespaces: namespaces,
fs: fs,
}
}
func (factory *fsLoaderFactory) New(source string) gojsonschema.JSONLoader {
return &fsLoader{
factory: factory,
source: source,
}
}
// refContents returns the contents of ref, if available in fsLoaderFactory.
func (factory *fsLoaderFactory) refContents(ref gojsonreference.JsonReference) ([]byte, error) {
refStr := ref.String()
path := ""
for _, ns := range factory.namespaces {
if strings.HasPrefix(refStr, ns) {
path = "/" + strings.TrimPrefix(refStr, ns)
break
}
}
if path == "" {
return nil, fmt.Errorf("Schema reference %#v unexpectedly not available in fsLoaderFactory with namespaces %#v", path, factory.namespaces)
}
f, err := factory.fs.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return ioutil.ReadAll(f)
}
// fsLoader implements gojsonschema.JSONLoader by reading the document named by source from a fsLoaderFactory.
type fsLoader struct {
factory *fsLoaderFactory
source string
}
// JsonSource implements gojsonschema.JSONLoader.JsonSource. The "Json" capitalization needs to be maintained to conform to the interface.
func (l *fsLoader) JsonSource() interface{} { // nolint: golint
return l.source
}
func (l *fsLoader) LoadJSON() (interface{}, error) {
// Based on gojsonschema.jsonReferenceLoader.LoadJSON.
reference, err := gojsonreference.NewJsonReference(l.source)
if err != nil {
return nil, err
}
refToURL := reference
refToURL.GetUrl().Fragment = ""
body, err := l.factory.refContents(refToURL)
if err != nil {
return nil, err
}
return decodeJSONUsingNumber(bytes.NewReader(body))
}
// decodeJSONUsingNumber returns JSON parsed from an io.Reader
func decodeJSONUsingNumber(r io.Reader) (interface{}, error) {
// Copied from gojsonschema.
var document interface{}
decoder := json.NewDecoder(r)
decoder.UseNumber()
err := decoder.Decode(&document)
if err != nil {
return nil, err
}
return document, nil
}
// JsonReference implements gojsonschema.JSONLoader.JsonReference. The "Json" capitalization needs to be maintained to conform to the interface.
func (l *fsLoader) JsonReference() (gojsonreference.JsonReference, error) { // nolint: golint
return gojsonreference.NewJsonReference(l.JsonSource().(string))
}
func (l *fsLoader) LoaderFactory() gojsonschema.JSONLoaderFactory {
return l.factory
}

View File

@ -35,13 +35,36 @@ var (
// having the OCI JSON schema files in root "/". // having the OCI JSON schema files in root "/".
fs = _escFS(false) fs = _escFS(false)
// specs maps OCI schema media types to schema files. // schemaNamespaces is a set of URI prefixes which are treated as containing the schema files of fs.
// This is necessary because *.json schema files in this directory use "id" and "$ref" attributes which evaluate to such URIs, e.g.
// ./image-manifest-schema.json URI contains
// "id": "https://opencontainers.org/schema/image/manifest",
// and
// "$ref": "content-descriptor.json"
// which evaluates as a link to https://opencontainers.org/schema/image/content-descriptor.json .
//
// To support such links without accessing the network (and trying to load content which is not hosted at these URIs),
// fsLoaderFactory accepts any URI starting with one of the schemaNamespaces below,
// and uses _escFS to load them from the root of its in-memory filesystem tree.
//
// (Note that this must contain subdirectories before its parent directories for fsLoaderFactory.refContents to work.)
schemaNamespaces = []string{
"https://opencontainers.org/schema/image/descriptor/",
"https://opencontainers.org/schema/image/index/",
"https://opencontainers.org/schema/image/manifest/",
"https://opencontainers.org/schema/image/",
"https://opencontainers.org/schema/",
}
// specs maps OCI schema media types to schema URIs.
// These URIs are expected to be used only by fsLoaderFactory (which trims schemaNamespaces defined above)
// and should never cause a network access.
specs = map[Validator]string{ specs = map[Validator]string{
ValidatorMediaTypeDescriptor: "content-descriptor.json", ValidatorMediaTypeDescriptor: "https://opencontainers.org/schema/content-descriptor.json",
ValidatorMediaTypeLayoutHeader: "image-layout-schema.json", ValidatorMediaTypeLayoutHeader: "https://opencontainers.org/schema/image/image-layout-schema.json",
ValidatorMediaTypeManifest: "image-manifest-schema.json", ValidatorMediaTypeManifest: "https://opencontainers.org/schema/image/image-manifest-schema.json",
ValidatorMediaTypeImageIndex: "image-index-schema.json", ValidatorMediaTypeImageIndex: "https://opencontainers.org/schema/image/image-index-schema.json",
ValidatorMediaTypeImageConfig: "config-schema.json", ValidatorMediaTypeImageConfig: "https://opencontainers.org/schema/image/config-schema.json",
} }
) )

View File

@ -67,7 +67,7 @@ func (v Validator) Validate(src io.Reader) error {
} }
} }
sl := gojsonschema.NewReferenceLoaderFileSystem("file:///"+specs[v], fs) sl := newFSLoaderFactory(schemaNamespaces, fs).New(specs[v])
ml := gojsonschema.NewStringLoader(string(buf)) ml := gojsonschema.NewStringLoader(string(buf))
result, err := gojsonschema.Validate(sl, ml) result, err := gojsonschema.Validate(sl, ml)
@ -157,7 +157,7 @@ func validateIndex(r io.Reader) error {
err = json.Unmarshal(buf, &header) err = json.Unmarshal(buf, &header)
if err != nil { if err != nil {
return errors.Wrap(err, "manifestlist format mismatch") return errors.Wrap(err, "index format mismatch")
} }
for _, manifest := range header.Manifests { for _, manifest := range header.Manifests {
@ -217,8 +217,8 @@ func checkPlatform(OS string, Architecture string) {
return return
} }
} }
fmt.Printf("warning: combination of %q and %q is invalid.", OS, Architecture) fmt.Printf("warning: combination of %q and %q is invalid.\n", OS, Architecture)
} }
} }
fmt.Printf("warning: operating system %q of the bundle is not supported yet.", OS) fmt.Printf("warning: operating system %q of the bundle is not supported yet.\n", OS)
} }

View File

@ -25,7 +25,7 @@ const (
VersionPatch = 0 VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = "-dev"
) )
// Version is the specification version that the package types support. // Version is the specification version that the package types support.

View File

@ -56,7 +56,7 @@ make BUILDTAGS='seccomp apparmor'
|-----------|------------------------------------|-------------| |-----------|------------------------------------|-------------|
| seccomp | Syscall filtering | libseccomp | | seccomp | Syscall filtering | libseccomp |
| selinux | selinux process and mount labeling | <none> | | selinux | selinux process and mount labeling | <none> |
| apparmor | apparmor profile support | libapparmor | | apparmor | apparmor profile support | <none> |
| ambient | ambient capability support | kernel 4.3 | | ambient | ambient capability support | kernel 4.3 |

View File

@ -22,7 +22,6 @@
#include <sys/types.h> #include <sys/types.h>
#include <sys/wait.h> #include <sys/wait.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/netlink.h> #include <linux/netlink.h>
#include <linux/types.h> #include <linux/types.h>
@ -73,7 +72,7 @@ struct nlconfig_t {
char *oom_score_adj; char *oom_score_adj;
size_t oom_score_adj_len; size_t oom_score_adj_len;
/* User namespace settings.*/ /* User namespace settings. */
char *uidmap; char *uidmap;
size_t uidmap_len; size_t uidmap_len;
char *gidmap; char *gidmap;
@ -82,7 +81,7 @@ struct nlconfig_t {
size_t namespaces_len; size_t namespaces_len;
uint8_t is_setgroup; uint8_t is_setgroup;
/* Rootless container settings.*/ /* Rootless container settings. */
uint8_t is_rootless; uint8_t is_rootless;
char *uidmappath; char *uidmappath;
size_t uidmappath_len; size_t uidmappath_len;
@ -167,7 +166,7 @@ static int write_file(char *data, size_t data_len, char *pathfmt, ...)
goto out; goto out;
} }
out: out:
close(fd); close(fd);
return ret; return ret;
} }
@ -226,14 +225,14 @@ static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len)
if (!child) { if (!child) {
#define MAX_ARGV 20 #define MAX_ARGV 20
char *argv[MAX_ARGV]; char *argv[MAX_ARGV];
char *envp[] = {NULL}; char *envp[] = { NULL };
char pid_fmt[16]; char pid_fmt[16];
int argc = 0; int argc = 0;
char *next; char *next;
snprintf(pid_fmt, 16, "%d", pid); snprintf(pid_fmt, 16, "%d", pid);
argv[argc++] = (char *) app; argv[argc++] = (char *)app;
argv[argc++] = pid_fmt; argv[argc++] = pid_fmt;
/* /*
* Convert the map string into a list of argument that * Convert the map string into a list of argument that
@ -533,7 +532,7 @@ void nsexec(void)
int pipenum; int pipenum;
jmp_buf env; jmp_buf env;
int sync_child_pipe[2], sync_grandchild_pipe[2]; int sync_child_pipe[2], sync_grandchild_pipe[2];
struct nlconfig_t config = {0}; struct nlconfig_t config = { 0 };
/* /*
* If we don't have an init pipe, just return to the go routine. * If we don't have an init pipe, just return to the go routine.
@ -637,14 +636,14 @@ void nsexec(void)
* it will send us its PID which we will send to the bootstrap * it will send us its PID which we will send to the bootstrap
* process. * process.
*/ */
case JUMP_PARENT: { case JUMP_PARENT:{
int len; int len;
pid_t child, first_child = -1; pid_t child, first_child = -1;
char buf[JSON_MAX]; char buf[JSON_MAX];
bool ready = false; bool ready = false;
/* For debugging. */ /* For debugging. */
prctl(PR_SET_NAME, (unsigned long) "runc:[0:PARENT]", 0, 0, 0); prctl(PR_SET_NAME, (unsigned long)"runc:[0:PARENT]", 0, 0, 0);
/* Start the process of getting a container. */ /* Start the process of getting a container. */
child = clone_parent(&env, JUMP_CHILD); child = clone_parent(&env, JUMP_CHILD);
@ -702,7 +701,7 @@ void nsexec(void)
bail("failed to sync with child: write(SYNC_USERMAP_ACK)"); bail("failed to sync with child: write(SYNC_USERMAP_ACK)");
} }
break; break;
case SYNC_RECVPID_PLS: { case SYNC_RECVPID_PLS:{
first_child = child; first_child = child;
/* Get the init_func pid. */ /* Get the init_func pid. */
@ -790,7 +789,7 @@ void nsexec(void)
* (stage 2: JUMP_INIT) for PID namespace. We then send the * (stage 2: JUMP_INIT) for PID namespace. We then send the
* child's PID to our parent (stage 0). * child's PID to our parent (stage 0).
*/ */
case JUMP_CHILD: { case JUMP_CHILD:{
pid_t child; pid_t child;
enum sync_t s; enum sync_t s;
@ -799,7 +798,7 @@ void nsexec(void)
close(sync_child_pipe[1]); close(sync_child_pipe[1]);
/* For debugging. */ /* For debugging. */
prctl(PR_SET_NAME, (unsigned long) "runc:[1:CHILD]", 0, 0, 0); prctl(PR_SET_NAME, (unsigned long)"runc:[1:CHILD]", 0, 0, 0);
/* /*
* We need to setns first. We cannot do this earlier (in stage 0) * We need to setns first. We cannot do this earlier (in stage 0)
@ -907,7 +906,7 @@ void nsexec(void)
* final cleanup steps and then return to the Go runtime to allow * final cleanup steps and then return to the Go runtime to allow
* init_linux.go to run. * init_linux.go to run.
*/ */
case JUMP_INIT: { case JUMP_INIT:{
/* /*
* We're inside the child now, having jumped from the * We're inside the child now, having jumped from the
* start_child() code after forking in the parent. * start_child() code after forking in the parent.
@ -921,7 +920,7 @@ void nsexec(void)
close(sync_child_pipe[1]); close(sync_child_pipe[1]);
/* For debugging. */ /* For debugging. */
prctl(PR_SET_NAME, (unsigned long) "runc:[2:INIT]", 0, 0, 0); prctl(PR_SET_NAME, (unsigned long)"runc:[2:INIT]", 0, 0, 0);
if (read(syncfd, &s, sizeof(s)) != sizeof(s)) if (read(syncfd, &s, sizeof(s)) != sizeof(s))
bail("failed to sync with parent: read(SYNC_GRANDCHILD)"); bail("failed to sync with parent: read(SYNC_GRANDCHILD)");

View File

@ -134,3 +134,14 @@ func RunningInUserNS() bool {
func SetSubreaper(i int) error { func SetSubreaper(i int) error {
return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
} }
// GetSubreaper returns the subreaper setting for the calling process
func GetSubreaper() (int, error) {
var i uintptr
if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil {
return -1, err
}
return int(i), nil
}

View File

@ -1,4 +1,4 @@
// +build cgo,linux cgo,freebsd // +build cgo,linux
package system package system

View File

@ -1,38 +0,0 @@
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package user
import (
"io"
"syscall"
)
func GetPasswdPath() (string, error) {
return "", ErrUnsupported
}
func GetPasswd() (io.ReadCloser, error) {
return nil, ErrUnsupported
}
func GetGroupPath() (string, error) {
return "", ErrUnsupported
}
func GetGroup() (io.ReadCloser, error) {
return nil, ErrUnsupported
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(syscall.Getuid())
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(syscall.Getgid())
}

View File

@ -28,7 +28,6 @@ const (
selinuxConfig = selinuxDir + "config" selinuxConfig = selinuxDir + "config"
selinuxTypeTag = "SELINUXTYPE" selinuxTypeTag = "SELINUXTYPE"
selinuxTag = "SELINUX" selinuxTag = "SELINUX"
selinuxPath = "/sys/fs/selinux"
xattrNameSelinux = "security.selinux" xattrNameSelinux = "security.selinux"
stRdOnly = 0x01 stRdOnly = 0x01
) )
@ -205,7 +204,7 @@ func readCon(name string) (string, error) {
defer in.Close() defer in.Close()
_, err = fmt.Fscanf(in, "%s", &val) _, err = fmt.Fscanf(in, "%s", &val)
return val, err return strings.Trim(val, "\x00"), err
} }
// SetFileLabel sets the SELinux label for this path or returns an error. // SetFileLabel sets the SELinux label for this path or returns an error.
@ -275,6 +274,32 @@ func writeCon(name string, val string) error {
return err return err
} }
/*
CanonicalizeContext takes a context string and writes it to the kernel
the function then returns the context that the kernel will use. This function
can be used to see if two contexts are equivalent
*/
func CanonicalizeContext(val string) (string, error) {
return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val)
}
func readWriteCon(name string, val string) (string, error) {
var retval string
f, err := os.OpenFile(name, os.O_RDWR, 0)
if err != nil {
return "", err
}
defer f.Close()
_, err = f.Write([]byte(val))
if err != nil {
return "", err
}
_, err = fmt.Fscanf(f, "%s", &retval)
return strings.Trim(retval, "\x00"), err
}
/* /*
SetExecLabel sets the SELinux label that the kernel will use for any programs SetExecLabel sets the SELinux label that the kernel will use for any programs
that are executed by the current process thread, or an error. that are executed by the current process thread, or an error.
@ -311,7 +336,7 @@ func ReserveLabel(label string) {
} }
func selinuxEnforcePath() string { func selinuxEnforcePath() string {
return fmt.Sprintf("%s/enforce", selinuxPath) return fmt.Sprintf("%s/enforce", getSelinuxMountPoint())
} }
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled // EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
@ -509,7 +534,7 @@ exit:
// SecurityCheckContext validates that the SELinux label is understood by the kernel // SecurityCheckContext validates that the SELinux label is understood by the kernel
func SecurityCheckContext(val string) error { func SecurityCheckContext(val string) error {
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val) return writeCon(fmt.Sprintf("%s/context", getSelinuxMountPoint()), val)
} }
/* /*

View File

@ -1,4 +1,4 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) # errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
Package errors provides simple error handling primitives. Package errors provides simple error handling primitives.
@ -47,6 +47,6 @@ We welcome pull requests, bug fixes and issue reports. With that said, the bar f
Before proposing a change, please discuss your change by raising an issue. Before proposing a change, please discuss your change by raising an issue.
## Licence ## License
BSD-2-Clause BSD-2-Clause

View File

@ -46,7 +46,8 @@ func (f Frame) line() int {
// //
// Format accepts flags that alter the printing of some verbs, as follows: // Format accepts flags that alter the printing of some verbs, as follows:
// //
// %+s path of source file relative to the compile time GOPATH // %+s function name and path of source file relative to the compile time
// GOPATH separated by \n\t (<funcname>\n\t<path>)
// %+v equivalent to %+s:%d // %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) { func (f Frame) Format(s fmt.State, verb rune) {
switch verb { switch verb {
@ -144,43 +145,3 @@ func funcname(name string) string {
i = strings.Index(name, ".") i = strings.Index(name, ".")
return name[i+1:] return name[i+1:]
} }
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

View File

@ -1,6 +1,39 @@
# gojsonpointer # gojsonpointer
An implementation of JSON Pointer - Go language An implementation of JSON Pointer - Go language
## Usage
jsonText := `{
"name": "Bobby B",
"occupation": {
"title" : "King",
"years" : 15,
"heir" : "Joffrey B"
}
}`
var jsonDocument map[string]interface{}
json.Unmarshal([]byte(jsonText), &jsonDocument)
//create a JSON pointer
pointerString := "/occupation/title"
pointer, _ := NewJsonPointer(pointerString)
//SET a new value for the "title" in the document
pointer.Set(jsonDocument, "Supreme Leader of Westeros")
//GET the new "title" from the document
title, _, _ := pointer.Get(jsonDocument)
fmt.Println(title) //outputs "Supreme Leader of Westeros"
//DELETE the "heir" from the document
deletePointer := NewJsonPointer("/occupation/heir")
deletePointer.Delete(jsonDocument)
b, _ := json.Marshal(jsonDocument)
fmt.Println(string(b))
//outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}`
## References ## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07

View File

@ -90,6 +90,13 @@ func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{},
} }
// Uses the pointer to delete a value from a JSON document
func (p *JsonPointer) Delete(document interface{}) (interface{}, error) {
is := &implStruct{mode: "DEL", inDocument: document}
p.implementation(is)
return document, is.outError
}
// Both Get and Set functions use the same implementation to avoid code duplication // Both Get and Set functions use the same implementation to avoid code duplication
func (p *JsonPointer) implementation(i *implStruct) { func (p *JsonPointer) implementation(i *implStruct) {
@ -106,9 +113,14 @@ func (p *JsonPointer) implementation(i *implStruct) {
node := i.inDocument node := i.inDocument
previousNodes := make([]interface{}, len(p.referenceTokens))
previousTokens := make([]string, len(p.referenceTokens))
for ti, token := range p.referenceTokens { for ti, token := range p.referenceTokens {
isLastToken := ti == len(p.referenceTokens)-1 isLastToken := ti == len(p.referenceTokens)-1
previousNodes[ti] = node
previousTokens[ti] = token
switch v := node.(type) { switch v := node.(type) {
@ -118,7 +130,11 @@ func (p *JsonPointer) implementation(i *implStruct) {
node = v[decodedToken] node = v[decodedToken]
if isLastToken && i.mode == "SET" { if isLastToken && i.mode == "SET" {
v[decodedToken] = i.setInValue v[decodedToken] = i.setInValue
} else if isLastToken && i.mode =="DEL" {
delete(v,decodedToken)
} }
} else if (isLastToken && i.mode == "SET") {
v[decodedToken] = i.setInValue
} else { } else {
i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
i.getOutKind = reflect.Map i.getOutKind = reflect.Map
@ -144,6 +160,11 @@ func (p *JsonPointer) implementation(i *implStruct) {
node = v[tokenIndex] node = v[tokenIndex]
if isLastToken && i.mode == "SET" { if isLastToken && i.mode == "SET" {
v[tokenIndex] = i.setInValue v[tokenIndex] = i.setInValue
} else if isLastToken && i.mode =="DEL" {
v[tokenIndex] = v[len(v)-1]
v[len(v)-1] = nil
v = v[:len(v)-1]
previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v
} }
default: default:

View File

@ -27,11 +27,12 @@ package gojsonreference
import ( import (
"errors" "errors"
"github.com/xeipuuv/gojsonpointer"
"net/url" "net/url"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings" "strings"
"github.com/xeipuuv/gojsonpointer"
) )
const ( const (
@ -124,16 +125,21 @@ func (r *JsonReference) parse(jsonReferenceString string) (err error) {
// Creates a new reference from a parent and a child // Creates a new reference from a parent and a child
// If the child cannot inherit from the parent, an error is returned // If the child cannot inherit from the parent, an error is returned
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
childUrl := child.GetUrl() if child.GetUrl() == nil {
parentUrl := r.GetUrl()
if childUrl == nil {
return nil, errors.New("childUrl is nil!") return nil, errors.New("childUrl is nil!")
} }
if parentUrl == nil {
if r.GetUrl() == nil {
return nil, errors.New("parentUrl is nil!") return nil, errors.New("parentUrl is nil!")
} }
ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String()) // Get a copy of the parent url to make sure we do not modify the original.
// URL reference resolving fails if the fragment of the child is empty, but the parent's is not.
// The fragment of the child must be used, so the fragment of the parent is manually removed.
parentUrl := *r.GetUrl()
parentUrl.Fragment = ""
ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,3 +1,4 @@
[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema)
[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) [![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema)
# gojsonschema # gojsonschema
@ -189,12 +190,14 @@ Note: An error of RequiredType has an err.Type() return value of "required"
**err.Value()**: *interface{}* Returns the value given **err.Value()**: *interface{}* Returns the value given
**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName **err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. **err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. **err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result.
**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* **err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
@ -285,7 +288,56 @@ func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
```` ````
## Additional custom validation
After the validation has run and you have the results, you may add additional
errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead
of having to add special exceptions for your own errors. Below is an example.
```go
type AnswerInvalidError struct {
gojsonschema.ResultErrorFields
}
func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError {
err := AnswerInvalidError{}
err.SetContext(context)
err.SetType("custom_invalid_error")
// it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed
// using the description of err will be overridden by this.
err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}")
err.SetValue(value)
err.SetDetails(details)
return &err
}
func main() {
// ...
schema, err := gojsonschema.NewSchema(schemaLoader)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if true { // some validation
jsonContext := gojsonschema.NewJsonContext("question", nil)
errDetail := gojsonschema.ErrorDetails{
"answer": 42,
}
result.AddError(
newAnswerInvalidError(
gojsonschema.NewJsonContext("answer", jsonContext),
52,
errDetail,
),
errDetail,
)
}
return result, err
}
```
This is especially useful if you want to add validation beyond what the
json schema drafts can provide such business specific logic.
## Uses ## Uses

View File

@ -145,10 +145,20 @@ type (
NumberLTError struct { NumberLTError struct {
ResultErrorFields ResultErrorFields
} }
// ConditionThenError. ErrorDetails: -
ConditionThenError struct {
ResultErrorFields
}
// ConditionElseError. ErrorDetails: -
ConditionElseError struct {
ResultErrorFields
}
) )
// newError takes a ResultError type and sets the type, context, description, details, value, and field // newError takes a ResultError type and sets the type, context, description, details, value, and field
func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) { func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) {
var t string var t string
var d string var d string
switch err.(type) { switch err.(type) {
@ -230,19 +240,26 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l
case *NumberLTError: case *NumberLTError:
t = "number_lt" t = "number_lt"
d = locale.NumberLT() d = locale.NumberLT()
case *ConditionThenError:
t = "condition_then"
d = locale.ConditionThen()
case *ConditionElseError:
t = "condition_else"
d = locale.ConditionElse()
} }
err.SetType(t) err.SetType(t)
err.SetContext(context) err.SetContext(context)
err.SetValue(value) err.SetValue(value)
err.SetDetails(details) err.SetDetails(details)
err.SetDescriptionFormat(d)
details["field"] = err.Field() details["field"] = err.Field()
if _, exists := details["context"]; !exists && context != nil { if _, exists := details["context"]; !exists && context != nil {
details["context"] = context.String() details["context"] = context.String()
} }
err.SetDescription(formatErrorDescription(d, details)) err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
} }
// formatErrorDescription takes a string in the default text/template // formatErrorDescription takes a string in the default text/template

View File

@ -26,20 +26,20 @@ package gojsonschema
import "bytes" import "bytes"
// jsonContext implements a persistent linked-list of strings // JsonContext implements a persistent linked-list of strings
type jsonContext struct { type JsonContext struct {
head string head string
tail *jsonContext tail *JsonContext
} }
func newJsonContext(head string, tail *jsonContext) *jsonContext { func NewJsonContext(head string, tail *JsonContext) *JsonContext {
return &jsonContext{head, tail} return &JsonContext{head, tail}
} }
// String displays the context in reverse. // String displays the context in reverse.
// This plays well with the data structure's persistent nature with // This plays well with the data structure's persistent nature with
// Cons and a json document's tree structure. // Cons and a json document's tree structure.
func (c *jsonContext) String(del ...string) string { func (c *JsonContext) String(del ...string) string {
byteArr := make([]byte, 0, c.stringLen()) byteArr := make([]byte, 0, c.stringLen())
buf := bytes.NewBuffer(byteArr) buf := bytes.NewBuffer(byteArr)
c.writeStringToBuffer(buf, del) c.writeStringToBuffer(buf, del)
@ -47,7 +47,7 @@ func (c *jsonContext) String(del ...string) string {
return buf.String() return buf.String()
} }
func (c *jsonContext) stringLen() int { func (c *JsonContext) stringLen() int {
length := 0 length := 0
if c.tail != nil { if c.tail != nil {
length = c.tail.stringLen() + 1 // add 1 for "." length = c.tail.stringLen() + 1 // add 1 for "."
@ -57,7 +57,7 @@ func (c *jsonContext) stringLen() int {
return length return length
} }
func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
if c.tail != nil { if c.tail != nil {
c.tail.writeStringToBuffer(buf, del) c.tail.writeStringToBuffer(buf, del)

View File

@ -76,6 +76,9 @@ type (
HttpBadStatus() string HttpBadStatus() string
ParseError() string ParseError() string
ConditionThen() string
ConditionElse() string
// ErrorFormat // ErrorFormat
ErrorFormat() string ErrorFormat() string
} }
@ -268,14 +271,23 @@ func (l DefaultLocale) ErrorFormat() string {
//Parse error //Parse error
func (l DefaultLocale) ParseError() string { func (l DefaultLocale) ParseError() string {
return `Expected: %expected%, given: Invalid JSON` return `Expected: {{.expected}}, given: Invalid JSON`
}
//If/Else
func (l DefaultLocale) ConditionThen() string {
return `Must validate "then" as "if" was valid`
}
func (l DefaultLocale) ConditionElse() string {
return `Must validate "else" as "if" was not valid`
} }
const ( const (
STRING_NUMBER = "number" STRING_NUMBER = "number"
STRING_ARRAY_OF_STRINGS = "array of strings" STRING_ARRAY_OF_STRINGS = "array of strings"
STRING_ARRAY_OF_SCHEMAS = "array of schemas" STRING_ARRAY_OF_SCHEMAS = "array of schemas"
STRING_SCHEMA = "schema" STRING_SCHEMA = "valid schema"
STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
STRING_PROPERTIES = "properties" STRING_PROPERTIES = "properties"
STRING_DEPENDENCY = "dependency" STRING_DEPENDENCY = "dependency"

View File

@ -40,10 +40,12 @@ type (
Field() string Field() string
SetType(string) SetType(string)
Type() string Type() string
SetContext(*jsonContext) SetContext(*JsonContext)
Context() *jsonContext Context() *JsonContext
SetDescription(string) SetDescription(string)
Description() string Description() string
SetDescriptionFormat(string)
DescriptionFormat() string
SetValue(interface{}) SetValue(interface{})
Value() interface{} Value() interface{}
SetDetails(ErrorDetails) SetDetails(ErrorDetails)
@ -56,8 +58,9 @@ type (
// can be defined by just embedding this type // can be defined by just embedding this type
ResultErrorFields struct { ResultErrorFields struct {
errorType string // A string with the type of error (i.e. invalid_type) errorType string // A string with the type of error (i.e. invalid_type)
context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
description string // A human readable error message description string // A human readable error message
descriptionFormat string // A format for human readable error message
value interface{} // Value given by the JSON file that is the source of the error value interface{} // Value given by the JSON file that is the source of the error
details ErrorDetails details ErrorDetails
} }
@ -90,11 +93,11 @@ func (v *ResultErrorFields) Type() string {
return v.errorType return v.errorType
} }
func (v *ResultErrorFields) SetContext(context *jsonContext) { func (v *ResultErrorFields) SetContext(context *JsonContext) {
v.context = context v.context = context
} }
func (v *ResultErrorFields) Context() *jsonContext { func (v *ResultErrorFields) Context() *JsonContext {
return v.context return v.context
} }
@ -106,6 +109,14 @@ func (v *ResultErrorFields) Description() string {
return v.description return v.description
} }
func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) {
v.descriptionFormat = descriptionFormat
}
func (v *ResultErrorFields) DescriptionFormat() string {
return v.descriptionFormat
}
func (v *ResultErrorFields) SetValue(value interface{}) { func (v *ResultErrorFields) SetValue(value interface{}) {
v.value = value v.value = value
} }
@ -154,8 +165,19 @@ func (v *Result) Valid() bool {
func (v *Result) Errors() []ResultError { func (v *Result) Errors() []ResultError {
return v.errors return v.errors
} }
// Add a fully filled error to the error set
// SetDescription() will be called with the result of the parsed err.DescriptionFormat()
func (v *Result) AddError(err ResultError, details ErrorDetails) {
if _, exists := details["context"]; !exists && err.Context() != nil {
details["context"] = err.Context().String()
}
func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) { err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
v.errors = append(v.errors, err)
}
func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) {
newError(err, context, value, Locale, details) newError(err, context, value, Locale, details)
v.errors = append(v.errors, err) v.errors = append(v.errors, err)
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function

View File

@ -27,8 +27,8 @@
package gojsonschema package gojsonschema
import ( import (
// "encoding/json"
"errors" "errors"
"math/big"
"reflect" "reflect"
"regexp" "regexp"
"text/template" "text/template"
@ -56,22 +56,30 @@ func NewSchema(l JSONLoader) (*Schema, error) {
d.documentReference = ref d.documentReference = ref
d.referencePool = newSchemaReferencePool() d.referencePool = newSchemaReferencePool()
var spd *schemaPoolDocument
var doc interface{} var doc interface{}
if ref.String() != "" { if ref.String() != "" {
// Get document from schema pool // Get document from schema pool
spd, err := d.pool.GetDocument(d.documentReference) spd, err = d.pool.GetDocument(d.documentReference)
if err != nil { if err != nil {
return nil, err return nil, err
} }
doc = spd.Document doc = spd.Document
// Deal with fragment pointers
jsonPointer := ref.GetPointer()
doc, _, err = jsonPointer.Get(doc)
if err != nil {
return nil, err
}
} else { } else {
// Load JSON directly // Load JSON directly
doc, err = l.LoadJSON() doc, err = l.LoadJSON()
if err != nil { if err != nil {
return nil, err return nil, err
} }
d.pool.SetStandaloneDocument(doc)
} }
d.pool.SetStandaloneDocument(doc)
err = d.parse(doc) err = d.parse(doc)
if err != nil { if err != nil {
@ -113,12 +121,48 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}, },
)) ))
} }
if currentSchema.parent == nil {
currentSchema.ref = &d.documentReference
currentSchema.id = &d.documentReference
}
if currentSchema.id == nil && currentSchema.parent != nil {
currentSchema.id = currentSchema.parent.id
}
m := documentNode.(map[string]interface{}) m := documentNode.(map[string]interface{})
if currentSchema == d.rootSchema { // id
currentSchema.ref = &d.documentReference if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_STRING,
"given": KEY_ID,
},
))
} }
if k, ok := m[KEY_ID].(string); ok {
jsonReference, err := gojsonreference.NewJsonReference(k)
if err != nil {
return err
}
if currentSchema == d.rootSchema {
currentSchema.id = &jsonReference
} else {
ref, err := currentSchema.parent.id.Inherits(jsonReference)
if err != nil {
return err
}
currentSchema.id = ref
}
}
// Add schema to document cache. The same id is passed down to subsequent
// subschemas, but as only the first and top one is used it will always reference
// the correct schema. Doing it once here prevents having
// to do this same step at every corner case.
d.referencePool.Add(currentSchema.id.String(), currentSchema)
// $subSchema // $subSchema
if existsMapKey(m, KEY_SCHEMA) { if existsMapKey(m, KEY_SCHEMA) {
@ -159,19 +203,17 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
if jsonReference.HasFullUrl { if jsonReference.HasFullUrl {
currentSchema.ref = &jsonReference currentSchema.ref = &jsonReference
} else { } else {
inheritedReference, err := currentSchema.ref.Inherits(jsonReference) inheritedReference, err := currentSchema.id.Inherits(jsonReference)
if err != nil { if err != nil {
return err return err
} }
currentSchema.ref = inheritedReference currentSchema.ref = inheritedReference
} }
if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok {
if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok {
currentSchema.refSchema = sch currentSchema.refSchema = sch
} else { } else {
err := d.parseReference(documentNode, currentSchema, k) err := d.parseReference(documentNode, currentSchema)
if err != nil { if err != nil {
return err return err
} }
@ -186,11 +228,23 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
currentSchema.definitions = make(map[string]*subSchema) currentSchema.definitions = make(map[string]*subSchema)
for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
if isKind(dv, reflect.Map) { if isKind(dv, reflect.Map) {
newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref}
currentSchema.definitions[dk] = newSchema ref, err := gojsonreference.NewJsonReference("#/" + KEY_DEFINITIONS + "/" + dk)
err := d.parseSchema(dv, newSchema)
if err != nil { if err != nil {
return errors.New(err.Error()) return err
}
newSchemaID, err := currentSchema.id.Inherits(ref)
if err != nil {
return err
}
newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, id: newSchemaID}
currentSchema.definitions[dk] = newSchema
err = d.parseSchema(dv, newSchema)
if err != nil {
return err
} }
} else { } else {
return errors.New(formatErrorDescription( return errors.New(formatErrorDescription(
@ -214,20 +268,6 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
} }
// id
if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_STRING,
"given": KEY_ID,
},
))
}
if k, ok := m[KEY_ID].(string); ok {
currentSchema.id = &k
}
// title // title
if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
return errors.New(formatErrorDescription( return errors.New(formatErrorDescription(
@ -443,7 +483,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}, },
)) ))
} }
if *multipleOfValue <= 0 { if multipleOfValue.Cmp(big.NewFloat(0)) <= 0 {
return errors.New(formatErrorDescription( return errors.New(formatErrorDescription(
Locale.GreaterThanZero(), Locale.GreaterThanZero(),
ErrorDetails{"number": KEY_MULTIPLE_OF}, ErrorDetails{"number": KEY_MULTIPLE_OF},
@ -511,7 +551,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
} }
if currentSchema.minimum != nil && currentSchema.maximum != nil { if currentSchema.minimum != nil && currentSchema.maximum != nil {
if *currentSchema.minimum > *currentSchema.maximum { if currentSchema.minimum.Cmp(currentSchema.maximum) == 1 {
return errors.New(formatErrorDescription( return errors.New(formatErrorDescription(
Locale.CannotBeGT(), Locale.CannotBeGT(),
ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM}, ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM},
@ -795,29 +835,83 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
} }
} }
if existsMapKey(m, KEY_IF) {
if isKind(m[KEY_IF], reflect.Map) {
newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetIf(newSchema)
err := d.parseSchema(m[KEY_IF], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT},
))
}
}
if existsMapKey(m, KEY_THEN) {
if isKind(m[KEY_THEN], reflect.Map) {
newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetThen(newSchema)
err := d.parseSchema(m[KEY_THEN], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT},
))
}
}
if existsMapKey(m, KEY_ELSE) {
if isKind(m[KEY_ELSE], reflect.Map) {
newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetElse(newSchema)
err := d.parseSchema(m[KEY_ELSE], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT},
))
}
}
return nil return nil
} }
func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error { func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error {
var refdDocumentNode interface{} var (
refdDocumentNode interface{}
dsp *schemaPoolDocument
err error
)
jsonPointer := currentSchema.ref.GetPointer() jsonPointer := currentSchema.ref.GetPointer()
standaloneDocument := d.pool.GetStandaloneDocument() standaloneDocument := d.pool.GetStandaloneDocument()
if standaloneDocument != nil { newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
var err error if currentSchema.ref.HasFragmentOnly {
refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument) refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument)
if err != nil { if err != nil {
return err return err
} }
} else { } else {
dsp, err := d.pool.GetDocument(*currentSchema.ref) dsp, err = d.pool.GetDocument(*currentSchema.ref)
if err != nil { if err != nil {
return err return err
} }
newSchema.id = currentSchema.ref
refdDocumentNode, _, err = jsonPointer.Get(dsp.Document) refdDocumentNode, _, err = jsonPointer.Get(dsp.Document)
if err != nil { if err != nil {
return err return err
} }
@ -833,10 +927,8 @@ func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSche
// returns the loaded referenced subSchema for the caller to update its current subSchema // returns the loaded referenced subSchema for the caller to update its current subSchema
newSchemaDocument := refdDocumentNode.(map[string]interface{}) newSchemaDocument := refdDocumentNode.(map[string]interface{})
newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
d.referencePool.Add(currentSchema.ref.String()+reference, newSchema)
err := d.parseSchema(newSchemaDocument, newSchema) err = d.parseSchema(newSchemaDocument, newSchema)
if err != nil { if err != nil {
return err return err
} }

View File

@ -62,12 +62,16 @@ func (p *schemaPool) GetStandaloneDocument() (document interface{}) {
func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
var (
spd *schemaPoolDocument
ok bool
err error
)
if internalLogEnabled { if internalLogEnabled {
internalLog("Get Document ( %s )", reference.String()) internalLog("Get Document ( %s )", reference.String())
} }
var err error
// It is not possible to load anything that is not canonical... // It is not possible to load anything that is not canonical...
if !reference.IsCanonical() { if !reference.IsCanonical() {
return nil, errors.New(formatErrorDescription( return nil, errors.New(formatErrorDescription(
@ -75,20 +79,10 @@ func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*sche
ErrorDetails{"reference": reference}, ErrorDetails{"reference": reference},
)) ))
} }
refToUrl := reference refToUrl := reference
refToUrl.GetUrl().Fragment = "" refToUrl.GetUrl().Fragment = ""
var spd *schemaPoolDocument if spd, ok = p.schemaPoolDocuments[refToUrl.String()]; ok {
// Try to find the requested document in the pool
for k := range p.schemaPoolDocuments {
if k == refToUrl.String() {
spd = p.schemaPoolDocuments[k]
}
}
if spd != nil {
if internalLogEnabled { if internalLogEnabled {
internalLog(" From pool") internalLog(" From pool")
} }

View File

@ -62,6 +62,7 @@ func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
if internalLogEnabled { if internalLogEnabled {
internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
} }
if _, ok := p.documents[ref]; !ok {
p.documents[ref] = sch p.documents[ref] = sch
}
} }

View File

@ -28,6 +28,7 @@ package gojsonschema
import ( import (
"errors" "errors"
"math/big"
"regexp" "regexp"
"strings" "strings"
@ -36,7 +37,7 @@ import (
const ( const (
KEY_SCHEMA = "$subSchema" KEY_SCHEMA = "$subSchema"
KEY_ID = "$id" KEY_ID = "id"
KEY_REF = "$ref" KEY_REF = "$ref"
KEY_TITLE = "title" KEY_TITLE = "title"
KEY_DESCRIPTION = "description" KEY_DESCRIPTION = "description"
@ -68,12 +69,14 @@ const (
KEY_ANY_OF = "anyOf" KEY_ANY_OF = "anyOf"
KEY_ALL_OF = "allOf" KEY_ALL_OF = "allOf"
KEY_NOT = "not" KEY_NOT = "not"
KEY_IF = "if"
KEY_THEN = "then"
KEY_ELSE = "else"
) )
type subSchema struct { type subSchema struct {
// basic subSchema meta properties // basic subSchema meta properties
id *string id *gojsonreference.JsonReference
title *string title *string
description *string description *string
@ -98,10 +101,10 @@ type subSchema struct {
propertiesChildren []*subSchema propertiesChildren []*subSchema
// validation : number / integer // validation : number / integer
multipleOf *float64 multipleOf *big.Float
maximum *float64 maximum *big.Float
exclusiveMaximum bool exclusiveMaximum bool
minimum *float64 minimum *big.Float
exclusiveMinimum bool exclusiveMinimum bool
// validation : string // validation : string
@ -134,6 +137,9 @@ type subSchema struct {
anyOf []*subSchema anyOf []*subSchema
allOf []*subSchema allOf []*subSchema
not *subSchema not *subSchema
_if *subSchema // if/else are golang keywords
_then *subSchema
_else *subSchema
} }
func (s *subSchema) AddEnum(i interface{}) error { func (s *subSchema) AddEnum(i interface{}) error {
@ -181,6 +187,18 @@ func (s *subSchema) SetNot(subSchema *subSchema) {
s.not = subSchema s.not = subSchema
} }
func (s *subSchema) SetIf(subSchema *subSchema) {
s._if = subSchema
}
func (s *subSchema) SetThen(subSchema *subSchema) {
s._then = subSchema
}
func (s *subSchema) SetElse(subSchema *subSchema) {
s._else = subSchema
}
func (s *subSchema) AddRequired(value string) error { func (s *subSchema) AddRequired(value string) error {
if isStringInSlice(s.required, value) { if isStringInSlice(s.required, value) {

View File

@ -29,6 +29,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"math" "math"
"math/big"
"reflect" "reflect"
"strconv" "strconv"
) )
@ -138,15 +139,13 @@ func mustBeInteger(what interface{}) *int {
return nil return nil
} }
func mustBeNumber(what interface{}) *float64 { func mustBeNumber(what interface{}) *big.Float {
if isJsonNumber(what) { if isJsonNumber(what) {
number := what.(json.Number) number := what.(json.Number)
float64Value, err := number.Float64() float64Value, success := new(big.Float).SetString(string(number))
if success {
if err == nil { return float64Value
return &float64Value
} else { } else {
return nil return nil
} }

View File

@ -27,6 +27,7 @@ package gojsonschema
import ( import (
"encoding/json" "encoding/json"
"math/big"
"reflect" "reflect"
"regexp" "regexp"
"strconv" "strconv"
@ -63,21 +64,21 @@ func (v *Schema) Validate(l JSONLoader) (*Result, error) {
// begin validation // begin validation
result := &Result{} result := &Result{}
context := newJsonContext(STRING_CONTEXT_ROOT, nil) context := NewJsonContext(STRING_CONTEXT_ROOT, nil)
v.rootSchema.validateRecursive(v.rootSchema, root, result, context) v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
return result, nil return result, nil
} }
func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result { func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result {
result := &Result{} result := &Result{}
v.validateRecursive(v, document, result, context) v.validateRecursive(v, document, result, context)
return result return result
} }
// Walker function to validate the json recursively against the subSchema // Walker function to validate the json recursively against the subSchema
func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
if internalLogEnabled { if internalLogEnabled {
internalLog("validateRecursive %s", context.String()) internalLog("validateRecursive %s", context.String())
@ -93,7 +94,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
// Check for null value // Check for null value
if currentNode == nil { if currentNode == nil {
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
result.addError( result.addInternalError(
new(InvalidTypeError), new(InvalidTypeError),
context, context,
currentNode, currentNode,
@ -125,7 +126,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
givenType = TYPE_NUMBER givenType = TYPE_NUMBER
} }
result.addError( result.addInternalError(
new(InvalidTypeError), new(InvalidTypeError),
context, context,
currentNode, currentNode,
@ -154,7 +155,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.Slice: case reflect.Slice:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
result.addError( result.addInternalError(
new(InvalidTypeError), new(InvalidTypeError),
context, context,
currentNode, currentNode,
@ -177,7 +178,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.Map: case reflect.Map:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
result.addError( result.addInternalError(
new(InvalidTypeError), new(InvalidTypeError),
context, context,
currentNode, currentNode,
@ -202,7 +203,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
for _, pSchema := range currentSubSchema.propertiesChildren { for _, pSchema := range currentSubSchema.propertiesChildren {
nextNode, ok := castCurrentNode[pSchema.property] nextNode, ok := castCurrentNode[pSchema.property]
if ok { if ok {
subContext := newJsonContext(pSchema.property, context) subContext := NewJsonContext(pSchema.property, context)
v.validateRecursive(pSchema, nextNode, result, subContext) v.validateRecursive(pSchema, nextNode, result, subContext)
} }
} }
@ -212,7 +213,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.Bool: case reflect.Bool:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
result.addError( result.addInternalError(
new(InvalidTypeError), new(InvalidTypeError),
context, context,
currentNode, currentNode,
@ -234,7 +235,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.String: case reflect.String:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
result.addError( result.addInternalError(
new(InvalidTypeError), new(InvalidTypeError),
context, context,
currentNode, currentNode,
@ -263,7 +264,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
} }
// Different kinds of validation there, subSchema / common / array / object / string... // Different kinds of validation there, subSchema / common / array / object / string...
func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
if internalLogEnabled { if internalLogEnabled {
internalLog("validateSchema %s", context.String()) internalLog("validateSchema %s", context.String())
@ -287,7 +288,7 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
} }
if !validatedAnyOf { if !validatedAnyOf {
result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
if bestValidationResult != nil { if bestValidationResult != nil {
// add error messages of closest matching subSchema as // add error messages of closest matching subSchema as
@ -313,7 +314,7 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
if nbValidated != 1 { if nbValidated != 1 {
result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
if nbValidated == 0 { if nbValidated == 0 {
// add error messages of closest matching subSchema as // add error messages of closest matching subSchema as
@ -336,14 +337,14 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
} }
if nbValidated != len(currentSubSchema.allOf) { if nbValidated != len(currentSubSchema.allOf) {
result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
} }
} }
if currentSubSchema.not != nil { if currentSubSchema.not != nil {
validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
if validationResult.Valid() { if validationResult.Valid() {
result.addError(new(NumberNotError), context, currentNode, ErrorDetails{}) result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{})
} }
} }
@ -356,7 +357,7 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
case []string: case []string:
for _, dependOnKey := range dependency { for _, dependOnKey := range dependency {
if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
result.addError( result.addInternalError(
new(MissingDependencyError), new(MissingDependencyError),
context, context,
currentNode, currentNode,
@ -374,10 +375,28 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
} }
} }
if currentSubSchema._if != nil {
validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context)
if currentSubSchema._then != nil && validationResultIf.Valid() {
validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context)
if !validationResultThen.Valid() {
result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{})
result.mergeErrors(validationResultThen)
}
}
if currentSubSchema._else != nil && !validationResultIf.Valid() {
validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context)
if !validationResultElse.Valid() {
result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{})
result.mergeErrors(validationResultElse)
}
}
}
result.incrementScore() result.incrementScore()
} }
func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
if internalLogEnabled { if internalLogEnabled {
internalLog("validateCommon %s", context.String()) internalLog("validateCommon %s", context.String())
@ -388,10 +407,10 @@ func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{
if len(currentSubSchema.enum) > 0 { if len(currentSubSchema.enum) > 0 {
has, err := currentSubSchema.ContainsEnum(value) has, err := currentSubSchema.ContainsEnum(value)
if err != nil { if err != nil {
result.addError(new(InternalError), context, value, ErrorDetails{"error": err}) result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
} }
if !has { if !has {
result.addError( result.addInternalError(
new(EnumError), new(EnumError),
context, context,
value, value,
@ -405,7 +424,7 @@ func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{
result.incrementScore() result.incrementScore()
} }
func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) {
if internalLogEnabled { if internalLogEnabled {
internalLog("validateArray %s", context.String()) internalLog("validateArray %s", context.String())
@ -417,7 +436,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// TODO explain // TODO explain
if currentSubSchema.itemsChildrenIsSingleSchema { if currentSubSchema.itemsChildrenIsSingleSchema {
for i := range value { for i := range value {
subContext := newJsonContext(strconv.Itoa(i), context) subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult) result.mergeErrors(validationResult)
} }
@ -428,7 +447,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// while we have both schemas and values, check them against each other // while we have both schemas and values, check them against each other
for i := 0; i != nbItems && i != nbValues; i++ { for i := 0; i != nbItems && i != nbValues; i++ {
subContext := newJsonContext(strconv.Itoa(i), context) subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult) result.mergeErrors(validationResult)
} }
@ -440,12 +459,12 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
switch currentSubSchema.additionalItems.(type) { switch currentSubSchema.additionalItems.(type) {
case bool: case bool:
if !currentSubSchema.additionalItems.(bool) { if !currentSubSchema.additionalItems.(bool) {
result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
} }
case *subSchema: case *subSchema:
additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
for i := nbItems; i != nbValues; i++ { for i := nbItems; i != nbValues; i++ {
subContext := newJsonContext(strconv.Itoa(i), context) subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult) result.mergeErrors(validationResult)
} }
@ -457,7 +476,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// minItems & maxItems // minItems & maxItems
if currentSubSchema.minItems != nil { if currentSubSchema.minItems != nil {
if nbValues < int(*currentSubSchema.minItems) { if nbValues < int(*currentSubSchema.minItems) {
result.addError( result.addInternalError(
new(ArrayMinItemsError), new(ArrayMinItemsError),
context, context,
value, value,
@ -467,7 +486,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
} }
if currentSubSchema.maxItems != nil { if currentSubSchema.maxItems != nil {
if nbValues > int(*currentSubSchema.maxItems) { if nbValues > int(*currentSubSchema.maxItems) {
result.addError( result.addInternalError(
new(ArrayMaxItemsError), new(ArrayMaxItemsError),
context, context,
value, value,
@ -482,10 +501,10 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
for _, v := range value { for _, v := range value {
vString, err := marshalToJsonString(v) vString, err := marshalToJsonString(v)
if err != nil { if err != nil {
result.addError(new(InternalError), context, value, ErrorDetails{"err": err}) result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err})
} }
if isStringInSlice(stringifiedItems, *vString) { if isStringInSlice(stringifiedItems, *vString) {
result.addError( result.addInternalError(
new(ItemsMustBeUniqueError), new(ItemsMustBeUniqueError),
context, context,
value, value,
@ -499,7 +518,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
result.incrementScore() result.incrementScore()
} }
func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) {
if internalLogEnabled { if internalLogEnabled {
internalLog("validateObject %s", context.String()) internalLog("validateObject %s", context.String())
@ -509,7 +528,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
// minProperties & maxProperties: // minProperties & maxProperties:
if currentSubSchema.minProperties != nil { if currentSubSchema.minProperties != nil {
if len(value) < int(*currentSubSchema.minProperties) { if len(value) < int(*currentSubSchema.minProperties) {
result.addError( result.addInternalError(
new(ArrayMinPropertiesError), new(ArrayMinPropertiesError),
context, context,
value, value,
@ -519,7 +538,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
} }
if currentSubSchema.maxProperties != nil { if currentSubSchema.maxProperties != nil {
if len(value) > int(*currentSubSchema.maxProperties) { if len(value) > int(*currentSubSchema.maxProperties) {
result.addError( result.addInternalError(
new(ArrayMaxPropertiesError), new(ArrayMaxPropertiesError),
context, context,
value, value,
@ -534,7 +553,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
if ok { if ok {
result.incrementScore() result.incrementScore()
} else { } else {
result.addError( result.addInternalError(
new(RequiredError), new(RequiredError),
context, context,
value, value,
@ -565,7 +584,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
if found { if found {
if pp_has && !pp_match { if pp_has && !pp_match {
result.addError( result.addInternalError(
new(AdditionalPropertyNotAllowedError), new(AdditionalPropertyNotAllowedError),
context, context,
value[pk], value[pk],
@ -576,7 +595,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
} else { } else {
if !pp_has || !pp_match { if !pp_has || !pp_match {
result.addError( result.addInternalError(
new(AdditionalPropertyNotAllowedError), new(AdditionalPropertyNotAllowedError),
context, context,
value[pk], value[pk],
@ -628,7 +647,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
if pp_has && !pp_match { if pp_has && !pp_match {
result.addError( result.addInternalError(
new(InvalidPropertyPatternError), new(InvalidPropertyPatternError),
context, context,
value[pk], value[pk],
@ -645,7 +664,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
result.incrementScore() result.incrementScore()
} }
func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) { func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) (has bool, matched bool) {
if internalLogEnabled { if internalLogEnabled {
internalLog("validatePatternProperty %s", context.String()) internalLog("validatePatternProperty %s", context.String())
@ -659,7 +678,7 @@ func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key str
for pk, pv := range currentSubSchema.patternProperties { for pk, pv := range currentSubSchema.patternProperties {
if matches, _ := regexp.MatchString(pk, key); matches { if matches, _ := regexp.MatchString(pk, key); matches {
has = true has = true
subContext := newJsonContext(key, context) subContext := NewJsonContext(key, context)
validationResult := pv.subValidateWithContext(value, subContext) validationResult := pv.subValidateWithContext(value, subContext)
result.mergeErrors(validationResult) result.mergeErrors(validationResult)
if validationResult.Valid() { if validationResult.Valid() {
@ -677,7 +696,7 @@ func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key str
return has, true return has, true
} }
func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
// Ignore JSON numbers // Ignore JSON numbers
if isJsonNumber(value) { if isJsonNumber(value) {
@ -699,7 +718,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
// minLength & maxLength: // minLength & maxLength:
if currentSubSchema.minLength != nil { if currentSubSchema.minLength != nil {
if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
result.addError( result.addInternalError(
new(StringLengthGTEError), new(StringLengthGTEError),
context, context,
value, value,
@ -709,7 +728,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
} }
if currentSubSchema.maxLength != nil { if currentSubSchema.maxLength != nil {
if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
result.addError( result.addInternalError(
new(StringLengthLTEError), new(StringLengthLTEError),
context, context,
value, value,
@ -721,7 +740,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
// pattern: // pattern:
if currentSubSchema.pattern != nil { if currentSubSchema.pattern != nil {
if !currentSubSchema.pattern.MatchString(stringValue) { if !currentSubSchema.pattern.MatchString(stringValue) {
result.addError( result.addInternalError(
new(DoesNotMatchPatternError), new(DoesNotMatchPatternError),
context, context,
value, value,
@ -734,7 +753,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
// format // format
if currentSubSchema.format != "" { if currentSubSchema.format != "" {
if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
result.addError( result.addInternalError(
new(DoesNotMatchFormatError), new(DoesNotMatchFormatError),
context, context,
value, value,
@ -746,7 +765,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
result.incrementScore() result.incrementScore()
} }
func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
// Ignore non numbers // Ignore non numbers
if !isJsonNumber(value) { if !isJsonNumber(value) {
@ -759,17 +778,17 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{
} }
number := value.(json.Number) number := value.(json.Number)
float64Value, _ := number.Float64() float64Value, _ := new(big.Float).SetString(string(number))
// multipleOf: // multipleOf:
if currentSubSchema.multipleOf != nil { if currentSubSchema.multipleOf != nil {
if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) { if q := new(big.Float).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() {
result.addError( result.addInternalError(
new(MultipleOfError), new(MultipleOfError),
context, context,
resultErrorFormatJsonNumber(number), resultErrorFormatJsonNumber(number),
ErrorDetails{"multiple": *currentSubSchema.multipleOf}, ErrorDetails{"multiple": currentSubSchema.multipleOf},
) )
} }
} }
@ -777,24 +796,24 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{
//maximum & exclusiveMaximum: //maximum & exclusiveMaximum:
if currentSubSchema.maximum != nil { if currentSubSchema.maximum != nil {
if currentSubSchema.exclusiveMaximum { if currentSubSchema.exclusiveMaximum {
if float64Value >= *currentSubSchema.maximum { if float64Value.Cmp(currentSubSchema.maximum) >= 0 {
result.addError( result.addInternalError(
new(NumberLTError), new(NumberLTError),
context, context,
resultErrorFormatJsonNumber(number), resultErrorFormatJsonNumber(number),
ErrorDetails{ ErrorDetails{
"max": resultErrorFormatNumber(*currentSubSchema.maximum), "max": currentSubSchema.maximum,
}, },
) )
} }
} else { } else {
if float64Value > *currentSubSchema.maximum { if float64Value.Cmp(currentSubSchema.maximum) == 1 {
result.addError( result.addInternalError(
new(NumberLTEError), new(NumberLTEError),
context, context,
resultErrorFormatJsonNumber(number), resultErrorFormatJsonNumber(number),
ErrorDetails{ ErrorDetails{
"max": resultErrorFormatNumber(*currentSubSchema.maximum), "max": currentSubSchema.maximum,
}, },
) )
} }
@ -804,24 +823,25 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{
//minimum & exclusiveMinimum: //minimum & exclusiveMinimum:
if currentSubSchema.minimum != nil { if currentSubSchema.minimum != nil {
if currentSubSchema.exclusiveMinimum { if currentSubSchema.exclusiveMinimum {
if float64Value <= *currentSubSchema.minimum { if float64Value.Cmp(currentSubSchema.minimum) <= 0 {
result.addError( // if float64Value <= *currentSubSchema.minimum {
result.addInternalError(
new(NumberGTError), new(NumberGTError),
context, context,
resultErrorFormatJsonNumber(number), resultErrorFormatJsonNumber(number),
ErrorDetails{ ErrorDetails{
"min": resultErrorFormatNumber(*currentSubSchema.minimum), "min": currentSubSchema.minimum,
}, },
) )
} }
} else { } else {
if float64Value < *currentSubSchema.minimum { if float64Value.Cmp(currentSubSchema.minimum) == -1 {
result.addError( result.addInternalError(
new(NumberGTEError), new(NumberGTEError),
context, context,
resultErrorFormatJsonNumber(number), resultErrorFormatJsonNumber(number),
ErrorDetails{ ErrorDetails{
"min": resultErrorFormatNumber(*currentSubSchema.minimum), "min": currentSubSchema.minimum,
}, },
) )
} }
@ -831,7 +851,7 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{
// format // format
if currentSubSchema.format != "" { if currentSubSchema.format != "" {
if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) {
result.addError( result.addInternalError(
new(DoesNotMatchFormatError), new(DoesNotMatchFormatError),
context, context,
value, value,

View File

@ -325,9 +325,8 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p) packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key") return nil, errors.StructuralError("first packet was not a public/private key")
} else {
e.PrimaryKey = &e.PrivateKey.PublicKey
} }
e.PrimaryKey = &e.PrivateKey.PublicKey
} }
if !e.PrimaryKey.PubKeyAlgo.CanSign() { if !e.PrimaryKey.PubKeyAlgo.CanSign() {

View File

@ -5,7 +5,7 @@
package http2 package http2
// A list of the possible cipher suite ids. Taken from // A list of the possible cipher suite ids. Taken from
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt // https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const ( const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000

View File

@ -73,7 +73,7 @@ type noDialH2RoundTripper struct{ t *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req) res, err := rt.t.RoundTrip(req)
if err == ErrNoCachedConn { if isNoCachedConnError(err) {
return nil, http.ErrSkipAltProtocol return nil, http.ErrSkipAltProtocol
} }
return res, err return res, err

View File

@ -652,7 +652,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
if err == nil { if err == nil {
return return
} }
if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
// Boring, expected errors. // Boring, expected errors.
sc.vlogf(format, args...) sc.vlogf(format, args...)
} else { } else {
@ -897,8 +897,11 @@ func (sc *serverConn) sendServeMsg(msg interface{}) {
} }
} }
// readPreface reads the ClientPreface greeting from the peer var errPrefaceTimeout = errors.New("timeout waiting for client preface")
// or returns an error on timeout or an invalid greeting.
// readPreface reads the ClientPreface greeting from the peer or
// returns errPrefaceTimeout on timeout, or an error if the greeting
// is invalid.
func (sc *serverConn) readPreface() error { func (sc *serverConn) readPreface() error {
errc := make(chan error, 1) errc := make(chan error, 1)
go func() { go func() {
@ -916,7 +919,7 @@ func (sc *serverConn) readPreface() error {
defer timer.Stop() defer timer.Stop()
select { select {
case <-timer.C: case <-timer.C:
return errors.New("timeout waiting for client preface") return errPrefaceTimeout
case err := <-errc: case err := <-errc:
if err == nil { if err == nil {
if VerboseLogs { if VerboseLogs {
@ -2319,7 +2322,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = strconv.Itoa(len(p)) clen = strconv.Itoa(len(p))
} }
_, hasContentType := rws.snapHeader["Content-Type"] _, hasContentType := rws.snapHeader["Content-Type"]
if !hasContentType && bodyAllowedForStatus(rws.status) { if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
ctype = http.DetectContentType(p) ctype = http.DetectContentType(p)
} }
var date string var date string
@ -2487,6 +2490,24 @@ func (w *responseWriter) Header() http.Header {
return rws.handlerHeader return rws.handlerHeader
} }
// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
func checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
// at http://httpwg.org/specs/rfc7231.html#status.codes)
// and we might block under 200 (once we have more mature 1xx support).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
// no equivalent bogus thing we can realistically send in HTTP/2,
// so we'll consistently panic instead and help people find their bugs
// early. (We can't return an error from WriteHeader even if we wanted to.)
if code < 100 || code > 999 {
panic(fmt.Sprintf("invalid WriteHeader code %v", code))
}
}
func (w *responseWriter) WriteHeader(code int) { func (w *responseWriter) WriteHeader(code int) {
rws := w.rws rws := w.rws
if rws == nil { if rws == nil {
@ -2497,6 +2518,7 @@ func (w *responseWriter) WriteHeader(code int) {
func (rws *responseWriterState) writeHeader(code int) { func (rws *responseWriterState) writeHeader(code int) {
if !rws.wroteHeader { if !rws.wroteHeader {
checkWriteHeaderCode(code)
rws.wroteHeader = true rws.wroteHeader = true
rws.status = code rws.status = code
if len(rws.handlerHeader) > 0 { if len(rws.handlerHeader) > 0 {

View File

@ -306,7 +306,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
return return
} }
var ErrNoCachedConn = errors.New("http2: no cached connection was available") // noCachedConnError is the concrete type of ErrNoCachedConn, which
// needs to be detected by net/http regardless of whether it's its
// bundled version (in h2_bundle.go with a rewritten type name) or
// from a user's x/net/http2. As such, as it has a unique method name
// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
// isNoCachedConnError.
type noCachedConnError struct{}
func (noCachedConnError) IsHTTP2NoCachedConnError() {}
func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
// isNoCachedConnError reports whether err is of type noCachedConnError
// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
// may coexist in the same running program.
func isNoCachedConnError(err error) bool {
_, ok := err.(interface{ IsHTTP2NoCachedConnError() })
return ok
}
var ErrNoCachedConn error = noCachedConnError{}
// RoundTripOpt are options for the Transport.RoundTripOpt method. // RoundTripOpt are options for the Transport.RoundTripOpt method.
type RoundTripOpt struct { type RoundTripOpt struct {
@ -811,7 +830,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
cc.wmu.Lock() cc.wmu.Lock()
endStream := !hasBody && !hasTrailers endStream := !hasBody && !hasTrailers
werr := cc.writeHeaders(cs.ID, endStream, hdrs) werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
cc.wmu.Unlock() cc.wmu.Unlock()
traceWroteHeaders(cs.trace) traceWroteHeaders(cs.trace)
cc.mu.Unlock() cc.mu.Unlock()
@ -964,13 +983,12 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
} }
// requires cc.wmu be held // requires cc.wmu be held
func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
first := true // first frame written (HEADERS is first, then CONTINUATION) first := true // first frame written (HEADERS is first, then CONTINUATION)
frameSize := int(cc.maxFrameSize)
for len(hdrs) > 0 && cc.werr == nil { for len(hdrs) > 0 && cc.werr == nil {
chunk := hdrs chunk := hdrs
if len(chunk) > frameSize { if len(chunk) > maxFrameSize {
chunk = chunk[:frameSize] chunk = chunk[:maxFrameSize]
} }
hdrs = hdrs[len(chunk):] hdrs = hdrs[len(chunk):]
endHeaders := len(hdrs) == 0 endHeaders := len(hdrs) == 0
@ -1087,13 +1105,17 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
} }
} }
cc.mu.Lock()
maxFrameSize := int(cc.maxFrameSize)
cc.mu.Unlock()
cc.wmu.Lock() cc.wmu.Lock()
defer cc.wmu.Unlock() defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or // Two ways to send END_STREAM: either with trailers, or
// with an empty DATA frame. // with an empty DATA frame.
if len(trls) > 0 { if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, trls) err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
} else { } else {
err = cc.fr.WriteData(cs.ID, true, nil) err = cc.fr.WriteData(cs.ID, true, nil)
} }
@ -1373,17 +1395,12 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct { type clientConnReadLoop struct {
cc *ClientConn cc *ClientConn
activeRes map[uint32]*clientStream // keyed by streamID
closeWhenIdle bool closeWhenIdle bool
} }
// readLoop runs in its own goroutine and reads and dispatches frames. // readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() { func (cc *ClientConn) readLoop() {
rl := &clientConnReadLoop{ rl := &clientConnReadLoop{cc: cc}
cc: cc,
activeRes: make(map[uint32]*clientStream),
}
defer rl.cleanup() defer rl.cleanup()
cc.readerErr = rl.run() cc.readerErr = rl.run()
if ce, ok := cc.readerErr.(ConnectionError); ok { if ce, ok := cc.readerErr.(ConnectionError); ok {
@ -1438,10 +1455,8 @@ func (rl *clientConnReadLoop) cleanup() {
} else if err == io.EOF { } else if err == io.EOF {
err = io.ErrUnexpectedEOF err = io.ErrUnexpectedEOF
} }
for _, cs := range rl.activeRes {
cs.bufPipe.CloseWithError(err)
}
for _, cs := range cc.streams { for _, cs := range cc.streams {
cs.bufPipe.CloseWithError(err) // no-op if already closed
select { select {
case cs.resc <- resAndError{err: err}: case cs.resc <- resAndError{err: err}:
default: default:
@ -1519,7 +1534,7 @@ func (rl *clientConnReadLoop) run() error {
} }
return err return err
} }
if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { if rl.closeWhenIdle && gotReply && maybeIdle {
cc.closeIfIdle() cc.closeIfIdle()
} }
} }
@ -1527,6 +1542,13 @@ func (rl *clientConnReadLoop) run() error {
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
cc := rl.cc cc := rl.cc
cs := cc.streamByID(f.StreamID, false)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
// was just something we canceled, ignore it.
return nil
}
if f.StreamEnded() { if f.StreamEnded() {
// Issue 20521: If the stream has ended, streamByID() causes // Issue 20521: If the stream has ended, streamByID() causes
// clientStream.done to be closed, which causes the request's bodyWriter // clientStream.done to be closed, which causes the request's bodyWriter
@ -1535,14 +1557,15 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
// Deferring stream closure allows the header processing to occur first. // Deferring stream closure allows the header processing to occur first.
// clientConn.RoundTrip may still receive the bodyWriter error first, but // clientConn.RoundTrip may still receive the bodyWriter error first, but
// the fix for issue 16102 prioritises any response. // the fix for issue 16102 prioritises any response.
defer cc.streamByID(f.StreamID, true) //
// Issue 22413: If there is no request body, we should close the
// stream before writing to cs.resc so that the stream is closed
// immediately once RoundTrip returns.
if cs.req.Body != nil {
defer cc.forgetStreamID(f.StreamID)
} else {
cc.forgetStreamID(f.StreamID)
} }
cs := cc.streamByID(f.StreamID, false)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
// was just something we canceled, ignore it.
return nil
} }
if !cs.firstByte { if !cs.firstByte {
if cs.trace != nil { if cs.trace != nil {
@ -1567,6 +1590,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
} }
// Any other error type is a stream error. // Any other error type is a stream error.
cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
cc.forgetStreamID(cs.ID)
cs.resc <- resAndError{err: err} cs.resc <- resAndError{err: err}
return nil // return nil from process* funcs to keep conn alive return nil // return nil from process* funcs to keep conn alive
} }
@ -1574,9 +1598,6 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
// (nil, nil) special case. See handleResponse docs. // (nil, nil) special case. See handleResponse docs.
return nil return nil
} }
if res.Body != noBody {
rl.activeRes[cs.ID] = cs
}
cs.resTrailer = &res.Trailer cs.resTrailer = &res.Trailer
cs.resc <- resAndError{res: res} cs.resc <- resAndError{res: res}
return nil return nil
@ -1596,11 +1617,11 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
status := f.PseudoValue("status") status := f.PseudoValue("status")
if status == "" { if status == "" {
return nil, errors.New("missing status pseudo header") return nil, errors.New("malformed response from server: missing status pseudo header")
} }
statusCode, err := strconv.Atoi(status) statusCode, err := strconv.Atoi(status)
if err != nil { if err != nil {
return nil, errors.New("malformed non-numeric status pseudo header") return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
} }
if statusCode == 100 { if statusCode == 100 {
@ -1915,7 +1936,6 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
rl.closeWhenIdle = true rl.closeWhenIdle = true
} }
cs.bufPipe.closeWithErrorAndCode(err, code) cs.bufPipe.closeWithErrorAndCode(err, code)
delete(rl.activeRes, cs.ID)
select { select {
case cs.resc <- resAndError{err: err}: case cs.resc <- resAndError{err: err}:
@ -2042,7 +2062,6 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
cs.bufPipe.CloseWithError(err) cs.bufPipe.CloseWithError(err)
cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
} }
delete(rl.activeRes, cs.ID)
return nil return nil
} }

124
vendor/golang.org/x/sys/unix/affinity_linux.go generated vendored Normal file
View File

@ -0,0 +1,124 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CPU affinity functions
package unix
import (
"unsafe"
)
const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
// CPUSet represents a CPU affinity mask.
type CPUSet [cpuSetSize]cpuMask
func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
_, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
if e != 0 {
return errnoErr(e)
}
return nil
}
// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
// If pid is 0 the calling thread is used.
func SchedGetaffinity(pid int, set *CPUSet) error {
return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
}
// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
// If pid is 0 the calling thread is used.
func SchedSetaffinity(pid int, set *CPUSet) error {
return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
}
// Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() {
for i := range s {
s[i] = 0
}
}
func cpuBitsIndex(cpu int) int {
return cpu / _NCPUBITS
}
func cpuBitsMask(cpu int) cpuMask {
return cpuMask(1 << (uint(cpu) % _NCPUBITS))
}
// Set adds cpu to the set s.
func (s *CPUSet) Set(cpu int) {
i := cpuBitsIndex(cpu)
if i < len(s) {
s[i] |= cpuBitsMask(cpu)
}
}
// Clear removes cpu from the set s.
func (s *CPUSet) Clear(cpu int) {
i := cpuBitsIndex(cpu)
if i < len(s) {
s[i] &^= cpuBitsMask(cpu)
}
}
// IsSet reports whether cpu is in the set s.
func (s *CPUSet) IsSet(cpu int) bool {
i := cpuBitsIndex(cpu)
if i < len(s) {
return s[i]&cpuBitsMask(cpu) != 0
}
return false
}
// Count returns the number of CPUs in the set s.
func (s *CPUSet) Count() int {
c := 0
for _, b := range s {
c += onesCount64(uint64(b))
}
return c
}
// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64.
// Once this package can require Go 1.9, we can delete this
// and update the caller to use bits.OnesCount64.
func onesCount64(x uint64) int {
const m0 = 0x5555555555555555 // 01010101 ...
const m1 = 0x3333333333333333 // 00110011 ...
const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
const m3 = 0x00ff00ff00ff00ff // etc.
const m4 = 0x0000ffff0000ffff
// Implementation: Parallel summing of adjacent bits.
// See "Hacker's Delight", Chap. 5: Counting Bits.
// The following pattern shows the general approach:
//
// x = x>>1&(m0&m) + x&(m0&m)
// x = x>>2&(m1&m) + x&(m1&m)
// x = x>>4&(m2&m) + x&(m2&m)
// x = x>>8&(m3&m) + x&(m3&m)
// x = x>>16&(m4&m) + x&(m4&m)
// x = x>>32&(m5&m) + x&(m5&m)
// return int(x)
//
// Masking (& operations) can be left away when there's no
// danger that a field's sum will carry over into the next
// field: Since the result cannot be > 64, 8 bits is enough
// and we can ignore the masks for the shifts by 8 and up.
// Per "Hacker's Delight", the first line can be simplified
// more, but it saves at best one instruction, so we leave
// it alone for clarity.
const m = 1<<64 - 1
x = x>>1&(m0&m) + x&(m0&m)
x = x>>2&(m1&m) + x&(m1&m)
x = (x>>4 + x) & (m2 & m)
x += x >> 8
x += x >> 16
x += x >> 32
return int(x) & (1<<7 - 1)
}

View File

@ -10,6 +10,10 @@
// System calls for 386, Linux // System calls for 386, Linux
// //
// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
// instead of the glibc-specific "CALL 0x10(GS)".
#define INVOKE_SYSCALL INT $0x80
// Just jump to package syscall's implementation for all these functions. // Just jump to package syscall's implementation for all these functions.
// The runtime may know about them. // The runtime may know about them.
@ -19,12 +23,38 @@ TEXT ·Syscall(SB),NOSPLIT,$0-28
TEXT ·Syscall6(SB),NOSPLIT,$0-40 TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB) JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
CALL runtime·entersyscall(SB)
MOVL trap+0(FP), AX // syscall entry
MOVL a1+4(FP), BX
MOVL a2+8(FP), CX
MOVL a3+12(FP), DX
MOVL $0, SI
MOVL $0, DI
INVOKE_SYSCALL
MOVL AX, r1+16(FP)
MOVL DX, r2+20(FP)
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28 TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB) JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB) JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVL trap+0(FP), AX // syscall entry
MOVL a1+4(FP), BX
MOVL a2+8(FP), CX
MOVL a3+12(FP), DX
MOVL $0, SI
MOVL $0, DI
INVOKE_SYSCALL
MOVL AX, r1+16(FP)
MOVL DX, r2+20(FP)
RET
TEXT ·socketcall(SB),NOSPLIT,$0-36 TEXT ·socketcall(SB),NOSPLIT,$0-36
JMP syscall·socketcall(SB) JMP syscall·socketcall(SB)

View File

@ -19,11 +19,39 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80 TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB) JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
CALL runtime·entersyscall(SB)
MOVQ a1+8(FP), DI
MOVQ a2+16(FP), SI
MOVQ a3+24(FP), DX
MOVQ $0, R10
MOVQ $0, R8
MOVQ $0, R9
MOVQ trap+0(FP), AX // syscall entry
SYSCALL
MOVQ AX, r1+32(FP)
MOVQ DX, r2+40(FP)
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56 TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB) JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB) JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVQ a1+8(FP), DI
MOVQ a2+16(FP), SI
MOVQ a3+24(FP), DX
MOVQ $0, R10
MOVQ $0, R8
MOVQ $0, R9
MOVQ trap+0(FP), AX // syscall entry
SYSCALL
MOVQ AX, r1+32(FP)
MOVQ DX, r2+40(FP)
RET
TEXT ·gettimeofday(SB),NOSPLIT,$0-16 TEXT ·gettimeofday(SB),NOSPLIT,$0-16
JMP syscall·gettimeofday(SB) JMP syscall·gettimeofday(SB)

View File

@ -19,11 +19,38 @@ TEXT ·Syscall(SB),NOSPLIT,$0-28
TEXT ·Syscall6(SB),NOSPLIT,$0-40 TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB) B syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
BL runtime·entersyscall(SB)
MOVW trap+0(FP), R7
MOVW a1+4(FP), R0
MOVW a2+8(FP), R1
MOVW a3+12(FP), R2
MOVW $0, R3
MOVW $0, R4
MOVW $0, R5
SWI $0
MOVW R0, r1+16(FP)
MOVW $0, R0
MOVW R0, r2+20(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28 TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB) B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB) B syscall·RawSyscall6(SB)
TEXT ·seek(SB),NOSPLIT,$0-32 TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW trap+0(FP), R7 // syscall entry
MOVW a1+4(FP), R0
MOVW a2+8(FP), R1
MOVW a3+12(FP), R2
SWI $0
MOVW R0, r1+16(FP)
MOVW $0, R0
MOVW R0, r2+20(FP)
RET
TEXT ·seek(SB),NOSPLIT,$0-28
B syscall·seek(SB) B syscall·seek(SB)

View File

@ -17,8 +17,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80 TEXT ·Syscall6(SB),NOSPLIT,$0-80
B syscall·Syscall6(SB) B syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R0
MOVD a2+16(FP), R1
MOVD a3+24(FP), R2
MOVD $0, R3
MOVD $0, R4
MOVD $0, R5
MOVD trap+0(FP), R8 // syscall entry
SVC
MOVD R0, r1+32(FP) // r1
MOVD R1, r2+40(FP) // r2
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56 TEXT ·RawSyscall(SB),NOSPLIT,$0-56
B syscall·RawSyscall(SB) B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
B syscall·RawSyscall6(SB) B syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R0
MOVD a2+16(FP), R1
MOVD a3+24(FP), R2
MOVD $0, R3
MOVD $0, R4
MOVD $0, R5
MOVD trap+0(FP), R8 // syscall entry
SVC
MOVD R0, r1+32(FP)
MOVD R1, r2+40(FP)
RET

View File

@ -21,8 +21,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80 TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB) JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
JAL runtime·entersyscall(SB)
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R2 // syscall entry
SYSCALL
MOVV R2, r1+32(FP)
MOVV R3, r2+40(FP)
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56 TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB) JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB) JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R2 // syscall entry
SYSCALL
MOVV R2, r1+32(FP)
MOVV R3, r2+40(FP)
RET

View File

@ -24,8 +24,31 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-40
TEXT ·Syscall9(SB),NOSPLIT,$0-52 TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB) JMP syscall·Syscall9(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
JAL runtime·entersyscall(SB)
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
MOVW a3+12(FP), R6
MOVW R0, R7
MOVW trap+0(FP), R2 // syscall entry
SYSCALL
MOVW R2, r1+16(FP) // r1
MOVW R3, r2+20(FP) // r2
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28 TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB) JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB) JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
MOVW a3+12(FP), R6
MOVW trap+0(FP), R2 // syscall entry
SYSCALL
MOVW R2, r1+16(FP)
MOVW R3, r2+20(FP)
RET

View File

@ -21,8 +21,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80 TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB) BR syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R3
MOVD a2+16(FP), R4
MOVD a3+24(FP), R5
MOVD R0, R6
MOVD R0, R7
MOVD R0, R8
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
MOVD R4, r2+40(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56 TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB) BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB) BR syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R3
MOVD a2+16(FP), R4
MOVD a3+24(FP), R5
MOVD R0, R6
MOVD R0, R7
MOVD R0, R8
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
MOVD R4, r2+40(FP)
RET

View File

@ -21,8 +21,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80 TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB) BR syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R2
MOVD a2+16(FP), R3
MOVD a3+24(FP), R4
MOVD $0, R5
MOVD $0, R6
MOVD $0, R7
MOVD trap+0(FP), R1 // syscall entry
SYSCALL
MOVD R2, r1+32(FP)
MOVD R3, r2+40(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56 TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB) BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB) BR syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R2
MOVD a2+16(FP), R3
MOVD a3+24(FP), R4
MOVD $0, R5
MOVD $0, R6
MOVD $0, R7
MOVD trap+0(FP), R1 // syscall entry
SYSCALL
MOVD R2, r1+32(FP)
MOVD R3, r2+40(FP)
RET

View File

@ -6,97 +6,12 @@
package unix package unix
import "unsafe" import "syscall"
// readInt returns the size-bytes unsigned integer in native byte order at offset off.
func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
if len(b) < int(off+size) {
return 0, false
}
if isBigEndian {
return readIntBE(b[off:], size), true
}
return readIntLE(b[off:], size), true
}
func readIntBE(b []byte, size uintptr) uint64 {
switch size {
case 1:
return uint64(b[0])
case 2:
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[1]) | uint64(b[0])<<8
case 4:
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
case 8:
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
default:
panic("syscall: readInt with unsupported size")
}
}
func readIntLE(b []byte, size uintptr) uint64 {
switch size {
case 1:
return uint64(b[0])
case 2:
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8
case 4:
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
case 8:
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
default:
panic("syscall: readInt with unsupported size")
}
}
// ParseDirent parses up to max directory entries in buf, // ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number of // appending the names to names. It returns the number of
// bytes consumed from buf, the number of entries added // bytes consumed from buf, the number of entries added
// to names, and the new names slice. // to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
origlen := len(buf) return syscall.ParseDirent(buf, max, names)
count = 0
for max != 0 && len(buf) > 0 {
reclen, ok := direntReclen(buf)
if !ok || reclen > uint64(len(buf)) {
return origlen, count, names
}
rec := buf[:reclen]
buf = buf[reclen:]
ino, ok := direntIno(rec)
if !ok {
break
}
if ino == 0 { // File absent in directory.
continue
}
const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
namlen, ok := direntNamlen(rec)
if !ok || namoff+namlen > uint64(len(rec)) {
break
}
name := rec[namoff : namoff+namlen]
for i, c := range name {
if c == 0 {
name = name[:i]
break
}
}
// Check for useless names before allocating a string.
if string(name) == "." || string(name) == ".." {
continue
}
max--
count++
names = append(names, string(name))
}
return origlen - len(buf), count, names
} }

View File

@ -25,3 +25,7 @@ func Clearenv() {
func Environ() []string { func Environ() []string {
return syscall.Environ() return syscall.Environ()
} }
func Unsetenv(key string) error {
return syscall.Unsetenv(key)
}

View File

@ -1,14 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package unix
import "syscall"
func Unsetenv(key string) error {
// This was added in Go 1.4.
return syscall.Unsetenv(key)
}

View File

@ -11,9 +11,19 @@ import "syscall"
// We can't use the gc-syntax .s files for gccgo. On the plus side // We can't use the gc-syntax .s files for gccgo. On the plus side
// much of the functionality can be written directly in Go. // much of the functionality can be written directly in Go.
//extern gccgoRealSyscallNoError
func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
//extern gccgoRealSyscall //extern gccgoRealSyscall
func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
syscall.Entersyscall()
r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
syscall.Exitsyscall()
return r, 0
}
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
syscall.Entersyscall() syscall.Entersyscall()
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
@ -35,6 +45,11 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
return r, 0, syscall.Errno(errno) return r, 0, syscall.Errno(errno)
} }
func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
return r, 0
}
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
return r, 0, syscall.Errno(errno) return r, 0, syscall.Errno(errno)

View File

@ -31,6 +31,12 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp
return r; return r;
} }
uintptr_t
gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
}
// Define the use function in C so that it is not inlined. // Define the use function in C so that it is not inlined.
extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));

View File

@ -352,6 +352,18 @@ func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
return &value, err return &value, err
} }
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
buf := make([]byte, 256)
vallen := _Socklen(len(buf))
err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
if err != nil {
return "", err
}
return string(buf[:vallen-1]), nil
}
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)

View File

@ -36,6 +36,7 @@ func Getwd() (string, error) {
return "", ENOTSUP return "", ENOTSUP
} }
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct { type SockaddrDatalink struct {
Len uint8 Len uint8
Family uint8 Family uint8
@ -76,18 +77,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil return buf[0 : n/siz], nil
} }
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }

View File

@ -60,3 +60,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
} }
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
// of darwin/arm the syscall is called sysctl instead of __sysctl.
const SYS___SYSCTL = SYS_SYSCTL

View File

@ -14,6 +14,7 @@ package unix
import "unsafe" import "unsafe"
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct { type SockaddrDatalink struct {
Len uint8 Len uint8
Family uint8 Family uint8
@ -56,22 +57,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil return buf[0 : n/siz], nil
} }
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
namlen, ok := direntNamlen(buf)
if !ok {
return 0, false
}
return (16 + namlen + 1 + 7) &^ 7, true
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sysnb pipe() (r int, w int, err error) //sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
@ -110,6 +95,23 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
return return
} }
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD
func Getwd() (string, error) {
var buf [PathMax]byte
_, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
n := clen(buf[:])
if n < 1 {
return "", EINVAL
}
return string(buf[:n]), nil
}
func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
var _p0 unsafe.Pointer var _p0 unsafe.Pointer
var bufsize uintptr var bufsize uintptr
@ -169,6 +171,69 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
return &value, err return &value, err
} }
func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error {
err := sysctl(mib, old, oldlen, nil, 0)
if err != nil {
// Utsname members on Dragonfly are only 32 bytes and
// the syscall returns ENOMEM in case the actual value
// is longer.
if err == ENOMEM {
err = nil
}
}
return err
}
func Uname(uname *Utsname) error {
mib := []_C_int{CTL_KERN, KERN_OSTYPE}
n := unsafe.Sizeof(uname.Sysname)
if err := sysctlUname(mib, &uname.Sysname[0], &n); err != nil {
return err
}
uname.Sysname[unsafe.Sizeof(uname.Sysname)-1] = 0
mib = []_C_int{CTL_KERN, KERN_HOSTNAME}
n = unsafe.Sizeof(uname.Nodename)
if err := sysctlUname(mib, &uname.Nodename[0], &n); err != nil {
return err
}
uname.Nodename[unsafe.Sizeof(uname.Nodename)-1] = 0
mib = []_C_int{CTL_KERN, KERN_OSRELEASE}
n = unsafe.Sizeof(uname.Release)
if err := sysctlUname(mib, &uname.Release[0], &n); err != nil {
return err
}
uname.Release[unsafe.Sizeof(uname.Release)-1] = 0
mib = []_C_int{CTL_KERN, KERN_VERSION}
n = unsafe.Sizeof(uname.Version)
if err := sysctlUname(mib, &uname.Version[0], &n); err != nil {
return err
}
// The version might have newlines or tabs in it, convert them to
// spaces.
for i, b := range uname.Version {
if b == '\n' || b == '\t' {
if i == len(uname.Version)-1 {
uname.Version[i] = 0
} else {
uname.Version[i] = ' '
}
}
}
mib = []_C_int{CTL_HW, HW_MACHINE}
n = unsafe.Sizeof(uname.Machine)
if err := sysctlUname(mib, &uname.Machine[0], &n); err != nil {
return err
}
uname.Machine[unsafe.Sizeof(uname.Machine)-1] = 0
return nil
}
/* /*
* Exposed directly * Exposed directly
*/ */

View File

@ -14,6 +14,7 @@ package unix
import "unsafe" import "unsafe"
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct { type SockaddrDatalink struct {
Len uint8 Len uint8
Family uint8 Family uint8
@ -54,18 +55,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil return buf[0 : n/siz], nil
} }
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sysnb pipe() (r int, w int, err error) //sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
@ -105,6 +94,23 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
return return
} }
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD
func Getwd() (string, error) {
var buf [PathMax]byte
_, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
n := clen(buf[:])
if n < 1 {
return "", EINVAL
}
return string(buf[:n]), nil
}
func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
var _p0 unsafe.Pointer var _p0 unsafe.Pointer
var bufsize uintptr var bufsize uintptr
@ -276,7 +282,6 @@ func Listxattr(file string, dest []byte) (sz int, err error) {
// FreeBSD won't allow you to list xattrs from multiple namespaces // FreeBSD won't allow you to list xattrs from multiple namespaces
s := 0 s := 0
var e error
for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz)
@ -288,7 +293,6 @@ func Listxattr(file string, dest []byte) (sz int, err error) {
* we don't have read permissions on, so don't ignore those errors * we don't have read permissions on, so don't ignore those errors
*/ */
if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
e = nil
continue continue
} else if e != nil { } else if e != nil {
return s, e return s, e
@ -302,7 +306,7 @@ func Listxattr(file string, dest []byte) (sz int, err error) {
d = initxattrdest(dest, s) d = initxattrdest(dest, s)
} }
return s, e return s, nil
} }
func Flistxattr(fd int, dest []byte) (sz int, err error) { func Flistxattr(fd int, dest []byte) (sz int, err error) {
@ -310,11 +314,9 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) {
destsiz := len(dest) destsiz := len(dest)
s := 0 s := 0
var e error
for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz)
if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
e = nil
continue continue
} else if e != nil { } else if e != nil {
return s, e return s, e
@ -328,7 +330,7 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) {
d = initxattrdest(dest, s) d = initxattrdest(dest, s)
} }
return s, e return s, nil
} }
func Llistxattr(link string, dest []byte) (sz int, err error) { func Llistxattr(link string, dest []byte) (sz int, err error) {
@ -336,11 +338,9 @@ func Llistxattr(link string, dest []byte) (sz int, err error) {
destsiz := len(dest) destsiz := len(dest)
s := 0 s := 0
var e error
for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz)
if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
e = nil
continue continue
} else if e != nil { } else if e != nil {
return s, e return s, e
@ -354,7 +354,7 @@ func Llistxattr(link string, dest []byte) (sz int, err error) {
d = initxattrdest(dest, s) d = initxattrdest(dest, s)
} }
return s, e return s, nil
} }
//sys ioctl(fd int, req uint, arg uintptr) (err error) //sys ioctl(fd int, req uint, arg uintptr) (err error)
@ -396,6 +396,52 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
return &value, err return &value, err
} }
func Uname(uname *Utsname) error {
mib := []_C_int{CTL_KERN, KERN_OSTYPE}
n := unsafe.Sizeof(uname.Sysname)
if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_HOSTNAME}
n = unsafe.Sizeof(uname.Nodename)
if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_OSRELEASE}
n = unsafe.Sizeof(uname.Release)
if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_VERSION}
n = unsafe.Sizeof(uname.Version)
if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil {
return err
}
// The version might have newlines or tabs in it, convert them to
// spaces.
for i, b := range uname.Version {
if b == '\n' || b == '\t' {
if i == len(uname.Version)-1 {
uname.Version[i] = 0
} else {
uname.Version[i] = ' '
}
}
}
mib = []_C_int{CTL_HW, HW_MACHINE}
n = unsafe.Sizeof(uname.Machine)
if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil {
return err
}
return nil
}
/* /*
* Exposed directly * Exposed directly
*/ */
@ -439,6 +485,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error)
//sys Fsync(fd int) (err error) //sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error) //sys Ftruncate(fd int, length int64) (err error)
//sys Getdents(fd int, buf []byte) (n int, err error)
//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
//sys Getdtablesize() (size int) //sys Getdtablesize() (size int)
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)

View File

@ -413,6 +413,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), sl, nil return unsafe.Pointer(&sa.raw), sl, nil
} }
// SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets.
type SockaddrLinklayer struct { type SockaddrLinklayer struct {
Protocol uint16 Protocol uint16
Ifindex int Ifindex int
@ -439,6 +440,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
} }
// SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets.
type SockaddrNetlink struct { type SockaddrNetlink struct {
Family uint16 Family uint16
Pad uint16 Pad uint16
@ -455,6 +457,8 @@ func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
} }
// SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the HCI protocol.
type SockaddrHCI struct { type SockaddrHCI struct {
Dev uint16 Dev uint16
Channel uint16 Channel uint16
@ -468,6 +472,31 @@ func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
} }
// SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the L2CAP protocol.
type SockaddrL2 struct {
PSM uint16
CID uint16
Addr [6]uint8
AddrType uint8
raw RawSockaddrL2
}
func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
cid[0] = byte(sa.CID)
cid[1] = byte(sa.CID >> 8)
sa.raw.Bdaddr_type = sa.AddrType
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil
}
// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
// The RxID and TxID fields are used for transport protocol addressing in // The RxID and TxID fields are used for transport protocol addressing in
// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
@ -808,6 +837,24 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err return &value, err
} }
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
buf := make([]byte, 256)
vallen := _Socklen(len(buf))
err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
if err != nil {
if err == ERANGE {
buf = make([]byte, vallen)
err = getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
}
if err != nil {
return "", err
}
}
return string(buf[:vallen-1]), nil
}
func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
} }
@ -1172,22 +1219,6 @@ func ReadDirent(fd int, buf []byte) (n int, err error) {
return Getdents(fd, buf) return Getdents(fd, buf)
} }
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
reclen, ok := direntReclen(buf)
if !ok {
return 0, false
}
return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
}
//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) //sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
@ -1293,6 +1324,7 @@ func Setgid(uid int) (err error) {
//sys Setpriority(which int, who int, prio int) (err error) //sys Setpriority(which int, who int, prio int) (err error)
//sys Setxattr(path string, attr string, data []byte, flags int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error)
//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error)
//sys Sync() //sys Sync()
//sys Syncfs(fd int) (err error) //sys Syncfs(fd int) (err error)
//sysnb Sysinfo(info *Sysinfo_t) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error)
@ -1410,7 +1442,6 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
// Msgget // Msgget
// Msgrcv // Msgrcv
// Msgsnd // Msgsnd
// Newfstatat
// Nfsservctl // Nfsservctl
// Personality // Personality
// Pselect6 // Pselect6
@ -1431,11 +1462,9 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
// RtSigtimedwait // RtSigtimedwait
// SchedGetPriorityMax // SchedGetPriorityMax
// SchedGetPriorityMin // SchedGetPriorityMin
// SchedGetaffinity
// SchedGetparam // SchedGetparam
// SchedGetscheduler // SchedGetscheduler
// SchedRrGetInterval // SchedRrGetInterval
// SchedSetaffinity
// SchedSetparam // SchedSetparam
// SchedYield // SchedYield
// Security // Security

View File

@ -54,6 +54,7 @@ func Pipe2(p []int, flags int) (err error) {
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64
//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
//sysnb Getegid() (egid int) = SYS_GETEGID32 //sysnb Getegid() (egid int) = SYS_GETEGID32
//sysnb Geteuid() (euid int) = SYS_GETEUID32 //sysnb Geteuid() (euid int) = SYS_GETEUID32

View File

@ -11,6 +11,7 @@ package unix
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
//sys Fchown(fd int, uid int, gid int) (err error) //sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
//sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error) //sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)

View File

@ -77,6 +77,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
//sys Dup2(oldfd int, newfd int) (err error) //sys Dup2(oldfd int, newfd int) (err error)
//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sysnb Getegid() (egid int) = SYS_GETEGID32 //sysnb Getegid() (egid int) = SYS_GETEGID32
//sysnb Geteuid() (euid int) = SYS_GETEUID32 //sysnb Geteuid() (euid int) = SYS_GETEUID32
//sysnb Getgid() (gid int) = SYS_GETGID32 //sysnb Getgid() (gid int) = SYS_GETGID32

14
vendor/golang.org/x/sys/unix/syscall_linux_gc.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,!gccgo
package unix
// SyscallNoError may be used instead of Syscall for syscalls that don't fail.
func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
// RawSyscallNoError may be used instead of RawSyscall for syscalls that don't
// fail.
func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)

View File

@ -10,6 +10,7 @@ package unix
//sys Dup2(oldfd int, newfd int) (err error) //sys Dup2(oldfd int, newfd int) (err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fchown(fd int, uid int, gid int) (err error) //sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
//sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error) //sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)

View File

@ -65,6 +65,7 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys Utime(path string, buf *Utimbuf) (err error) //sys Utime(path string, buf *Utimbuf) (err error)

View File

@ -11,6 +11,7 @@ package unix
//sys Dup2(oldfd int, newfd int) (err error) //sys Dup2(oldfd int, newfd int) (err error)
//sys Fchown(fd int, uid int, gid int) (err error) //sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
//sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error) //sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)

View File

@ -15,6 +15,7 @@ import (
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
//sys Fchown(fd int, uid int, gid int) (err error) //sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
//sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error) //sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)

View File

@ -10,6 +10,7 @@ package unix
//sys Dup2(oldfd int, newfd int) (err error) //sys Dup2(oldfd int, newfd int) (err error)
//sys Fchown(fd int, uid int, gid int) (err error) //sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error) //sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)

View File

@ -17,6 +17,7 @@ import (
"unsafe" "unsafe"
) )
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct { type SockaddrDatalink struct {
Len uint8 Len uint8
Family uint8 Family uint8
@ -92,18 +93,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return mib, nil return mib, nil
} }
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sysnb pipe() (fd1 int, fd2 int, err error) //sysnb pipe() (fd1 int, fd2 int, err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
if len(p) != 2 { if len(p) != 2 {
@ -118,6 +107,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
return getdents(fd, buf) return getdents(fd, buf)
} }
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD
func Getwd() (string, error) {
var buf [PathMax]byte
_, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
n := clen(buf[:])
if n < 1 {
return "", EINVAL
}
return string(buf[:n]), nil
}
// TODO // TODO
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
return -1, ENOSYS return -1, ENOSYS
@ -167,6 +173,52 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
return &value, err return &value, err
} }
func Uname(uname *Utsname) error {
mib := []_C_int{CTL_KERN, KERN_OSTYPE}
n := unsafe.Sizeof(uname.Sysname)
if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_HOSTNAME}
n = unsafe.Sizeof(uname.Nodename)
if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_OSRELEASE}
n = unsafe.Sizeof(uname.Release)
if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_VERSION}
n = unsafe.Sizeof(uname.Version)
if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil {
return err
}
// The version might have newlines or tabs in it, convert them to
// spaces.
for i, b := range uname.Version {
if b == '\n' || b == '\t' {
if i == len(uname.Version)-1 {
uname.Version[i] = 0
} else {
uname.Version[i] = ' '
}
}
}
mib = []_C_int{CTL_HW, HW_MACHINE}
n = unsafe.Sizeof(uname.Machine)
if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil {
return err
}
return nil
}
/* /*
* Exposed directly * Exposed directly
*/ */

View File

@ -1,11 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build dragonfly freebsd netbsd openbsd
package unix
const ImplementsGetwd = false
func Getwd() (string, error) { return "", ENOTSUP }

View File

@ -18,6 +18,7 @@ import (
"unsafe" "unsafe"
) )
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct { type SockaddrDatalink struct {
Len uint8 Len uint8
Family uint8 Family uint8
@ -42,18 +43,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return nil, EINVAL return nil, EINVAL
} }
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sysnb pipe(p *[2]_C_int) (err error) //sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
if len(p) != 2 { if len(p) != 2 {
@ -71,6 +60,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
return getdents(fd, buf) return getdents(fd, buf)
} }
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD
func Getwd() (string, error) {
var buf [PathMax]byte
_, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
n := clen(buf[:])
if n < 1 {
return "", EINVAL
}
return string(buf[:n]), nil
}
// TODO // TODO
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
return -1, ENOSYS return -1, ENOSYS
@ -135,6 +141,52 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
return &value, err return &value, err
} }
func Uname(uname *Utsname) error {
mib := []_C_int{CTL_KERN, KERN_OSTYPE}
n := unsafe.Sizeof(uname.Sysname)
if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_HOSTNAME}
n = unsafe.Sizeof(uname.Nodename)
if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_OSRELEASE}
n = unsafe.Sizeof(uname.Release)
if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_VERSION}
n = unsafe.Sizeof(uname.Version)
if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil {
return err
}
// The version might have newlines or tabs in it, convert them to
// spaces.
for i, b := range uname.Version {
if b == '\n' || b == '\t' {
if i == len(uname.Version)-1 {
uname.Version[i] = 0
} else {
uname.Version[i] = ' '
}
}
}
mib = []_C_int{CTL_HW, HW_MACHINE}
n = unsafe.Sizeof(uname.Machine)
if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil {
return err
}
return nil
}
/* /*
* Exposed directly * Exposed directly
*/ */

View File

@ -23,6 +23,7 @@ type syscallFunc uintptr
func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct { type SockaddrDatalink struct {
Family uint16 Family uint16
Index uint16 Index uint16
@ -34,31 +35,6 @@ type SockaddrDatalink struct {
raw RawSockaddrDatalink raw RawSockaddrDatalink
} }
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
reclen, ok := direntReclen(buf)
if !ok {
return 0, false
}
return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
}
//sysnb pipe(p *[2]_C_int) (n int, err error) //sysnb pipe(p *[2]_C_int) (n int, err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
@ -139,6 +115,18 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
return anyToSockaddr(&rsa) return anyToSockaddr(&rsa)
} }
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
buf := make([]byte, 256)
vallen := _Socklen(len(buf))
err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
if err != nil {
return "", err
}
return string(buf[:vallen-1]), nil
}
const ImplementsGetwd = true const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error) //sys Getcwd(buf []byte) (n int, err error)
@ -655,6 +643,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Rmdir(path string) (err error) //sys Rmdir(path string) (err error)
//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek
//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
//sysnb Setegid(egid int) (err error) //sysnb Setegid(egid int) (err error)
//sysnb Seteuid(euid int) (err error) //sysnb Seteuid(euid int) (err error)
//sysnb Setgid(gid int) (err error) //sysnb Setgid(gid int) (err error)

View File

@ -50,6 +50,16 @@ func errnoErr(e syscall.Errno) error {
return e return e
} }
// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte.
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
// Mmap manager, for use by operating system-specific implementations. // Mmap manager, for use by operating system-specific implementations.
type mmapper struct { type mmapper struct {
@ -138,16 +148,19 @@ func Write(fd int, p []byte) (n int, err error) {
// creation of IPv6 sockets to return EAFNOSUPPORT. // creation of IPv6 sockets to return EAFNOSUPPORT.
var SocketDisableIPv6 bool var SocketDisableIPv6 bool
// Sockaddr represents a socket address.
type Sockaddr interface { type Sockaddr interface {
sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs
} }
// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets.
type SockaddrInet4 struct { type SockaddrInet4 struct {
Port int Port int
Addr [4]byte Addr [4]byte
raw RawSockaddrInet4 raw RawSockaddrInet4
} }
// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets.
type SockaddrInet6 struct { type SockaddrInet6 struct {
Port int Port int
ZoneId uint32 ZoneId uint32
@ -155,6 +168,7 @@ type SockaddrInet6 struct {
raw RawSockaddrInet6 raw RawSockaddrInet6
} }
// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets.
type SockaddrUnix struct { type SockaddrUnix struct {
Name string Name string
raw RawSockaddrUnix raw RawSockaddrUnix

View File

@ -6,6 +6,8 @@
package unix package unix
import "time"
// TimespecToNsec converts a Timespec value into a number of // TimespecToNsec converts a Timespec value into a number of
// nanoseconds since the Unix epoch. // nanoseconds since the Unix epoch.
func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
@ -22,6 +24,24 @@ func NsecToTimespec(nsec int64) Timespec {
return setTimespec(sec, nsec) return setTimespec(sec, nsec)
} }
// TimeToTimespec converts t into a Timespec.
// On some 32-bit systems the range of valid Timespec values are smaller
// than that of time.Time values. So if t is out of the valid range of
// Timespec, it returns a zero Timespec and ERANGE.
func TimeToTimespec(t time.Time) (Timespec, error) {
sec := t.Unix()
nsec := int64(t.Nanosecond())
ts := setTimespec(sec, nsec)
// Currently all targets have either int32 or int64 for Timespec.Sec.
// If there were a new target with floating point type for it, we have
// to consider the rounding error.
if int64(ts.Sec) != sec {
return Timespec{}, ERANGE
}
return ts, nil
}
// TimevalToNsec converts a Timeval value into a number of nanoseconds // TimevalToNsec converts a Timeval value into a number of nanoseconds
// since the Unix epoch. // since the Unix epoch.
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }

View File

@ -168,6 +168,8 @@ const (
CSTOP = 0x13 CSTOP = 0x13
CSTOPB = 0x400 CSTOPB = 0x400
CSUSP = 0x1a CSUSP = 0x1a
CTL_HW = 0x6
CTL_KERN = 0x1
CTL_MAXNAME = 0xc CTL_MAXNAME = 0xc
CTL_NET = 0x4 CTL_NET = 0x4
DLT_A429 = 0xb8 DLT_A429 = 0xb8
@ -353,6 +355,7 @@ const (
F_UNLCK = 0x2 F_UNLCK = 0x2
F_WRLCK = 0x3 F_WRLCK = 0x3
HUPCL = 0x4000 HUPCL = 0x4000
HW_MACHINE = 0x1
ICANON = 0x100 ICANON = 0x100
ICMP6_FILTER = 0x12 ICMP6_FILTER = 0x12
ICRNL = 0x100 ICRNL = 0x100
@ -835,6 +838,10 @@ const (
IXANY = 0x800 IXANY = 0x800
IXOFF = 0x400 IXOFF = 0x400
IXON = 0x200 IXON = 0x200
KERN_HOSTNAME = 0xa
KERN_OSRELEASE = 0x2
KERN_OSTYPE = 0x1
KERN_VERSION = 0x4
LOCK_EX = 0x2 LOCK_EX = 0x2
LOCK_NB = 0x4 LOCK_NB = 0x4
LOCK_SH = 0x1 LOCK_SH = 0x1

View File

@ -351,6 +351,8 @@ const (
CSTOP = 0x13 CSTOP = 0x13
CSTOPB = 0x400 CSTOPB = 0x400
CSUSP = 0x1a CSUSP = 0x1a
CTL_HW = 0x6
CTL_KERN = 0x1
CTL_MAXNAME = 0x18 CTL_MAXNAME = 0x18
CTL_NET = 0x4 CTL_NET = 0x4
DLT_A429 = 0xb8 DLT_A429 = 0xb8
@ -608,6 +610,7 @@ const (
F_UNLCKSYS = 0x4 F_UNLCKSYS = 0x4
F_WRLCK = 0x3 F_WRLCK = 0x3
HUPCL = 0x4000 HUPCL = 0x4000
HW_MACHINE = 0x1
ICANON = 0x100 ICANON = 0x100
ICMP6_FILTER = 0x12 ICMP6_FILTER = 0x12
ICRNL = 0x100 ICRNL = 0x100
@ -944,6 +947,10 @@ const (
IXANY = 0x800 IXANY = 0x800
IXOFF = 0x400 IXOFF = 0x400
IXON = 0x200 IXON = 0x200
KERN_HOSTNAME = 0xa
KERN_OSRELEASE = 0x2
KERN_OSTYPE = 0x1
KERN_VERSION = 0x4
LOCK_EX = 0x2 LOCK_EX = 0x2
LOCK_NB = 0x4 LOCK_NB = 0x4
LOCK_SH = 0x1 LOCK_SH = 0x1

View File

@ -351,6 +351,8 @@ const (
CSTOP = 0x13 CSTOP = 0x13
CSTOPB = 0x400 CSTOPB = 0x400
CSUSP = 0x1a CSUSP = 0x1a
CTL_HW = 0x6
CTL_KERN = 0x1
CTL_MAXNAME = 0x18 CTL_MAXNAME = 0x18
CTL_NET = 0x4 CTL_NET = 0x4
DLT_A429 = 0xb8 DLT_A429 = 0xb8
@ -608,6 +610,7 @@ const (
F_UNLCKSYS = 0x4 F_UNLCKSYS = 0x4
F_WRLCK = 0x3 F_WRLCK = 0x3
HUPCL = 0x4000 HUPCL = 0x4000
HW_MACHINE = 0x1
ICANON = 0x100 ICANON = 0x100
ICMP6_FILTER = 0x12 ICMP6_FILTER = 0x12
ICRNL = 0x100 ICRNL = 0x100
@ -944,6 +947,10 @@ const (
IXANY = 0x800 IXANY = 0x800
IXOFF = 0x400 IXOFF = 0x400
IXON = 0x200 IXON = 0x200
KERN_HOSTNAME = 0xa
KERN_OSRELEASE = 0x2
KERN_OSTYPE = 0x1
KERN_VERSION = 0x4
LOCK_EX = 0x2 LOCK_EX = 0x2
LOCK_NB = 0x4 LOCK_NB = 0x4
LOCK_SH = 0x1 LOCK_SH = 0x1

View File

@ -351,6 +351,8 @@ const (
CSTOP = 0x13 CSTOP = 0x13
CSTOPB = 0x400 CSTOPB = 0x400
CSUSP = 0x1a CSUSP = 0x1a
CTL_HW = 0x6
CTL_KERN = 0x1
CTL_MAXNAME = 0x18 CTL_MAXNAME = 0x18
CTL_NET = 0x4 CTL_NET = 0x4
DLT_A429 = 0xb8 DLT_A429 = 0xb8
@ -615,6 +617,7 @@ const (
F_UNLCKSYS = 0x4 F_UNLCKSYS = 0x4
F_WRLCK = 0x3 F_WRLCK = 0x3
HUPCL = 0x4000 HUPCL = 0x4000
HW_MACHINE = 0x1
ICANON = 0x100 ICANON = 0x100
ICMP6_FILTER = 0x12 ICMP6_FILTER = 0x12
ICRNL = 0x100 ICRNL = 0x100
@ -951,6 +954,10 @@ const (
IXANY = 0x800 IXANY = 0x800
IXOFF = 0x400 IXOFF = 0x400
IXON = 0x200 IXON = 0x200
KERN_HOSTNAME = 0xa
KERN_OSRELEASE = 0x2
KERN_OSTYPE = 0x1
KERN_VERSION = 0x4
LOCK_EX = 0x2 LOCK_EX = 0x2
LOCK_NB = 0x4 LOCK_NB = 0x4
LOCK_SH = 0x1 LOCK_SH = 0x1

View File

@ -1638,6 +1638,27 @@ const (
SPLICE_F_MORE = 0x4 SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1 SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2 SPLICE_F_NONBLOCK = 0x2
STATX_ALL = 0xfff
STATX_ATIME = 0x20
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
STATX_ATTR_NODUMP = 0x40
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200 S_BLKSIZE = 0x200
S_IEXEC = 0x40 S_IEXEC = 0x40
S_IFBLK = 0x6000 S_IFBLK = 0x6000

View File

@ -1639,6 +1639,27 @@ const (
SPLICE_F_MORE = 0x4 SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1 SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2 SPLICE_F_NONBLOCK = 0x2
STATX_ALL = 0xfff
STATX_ATIME = 0x20
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
STATX_ATTR_NODUMP = 0x40
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200 S_BLKSIZE = 0x200
S_IEXEC = 0x40 S_IEXEC = 0x40
S_IFBLK = 0x6000 S_IFBLK = 0x6000

View File

@ -1643,6 +1643,27 @@ const (
SPLICE_F_MORE = 0x4 SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1 SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2 SPLICE_F_NONBLOCK = 0x2
STATX_ALL = 0xfff
STATX_ATIME = 0x20
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
STATX_ATTR_NODUMP = 0x40
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200 S_BLKSIZE = 0x200
S_IEXEC = 0x40 S_IEXEC = 0x40
S_IFBLK = 0x6000 S_IFBLK = 0x6000

View File

@ -1629,6 +1629,27 @@ const (
SPLICE_F_MORE = 0x4 SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1 SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2 SPLICE_F_NONBLOCK = 0x2
STATX_ALL = 0xfff
STATX_ATIME = 0x20
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
STATX_ATTR_NODUMP = 0x40
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200 S_BLKSIZE = 0x200
S_IEXEC = 0x40 S_IEXEC = 0x40
S_IFBLK = 0x6000 S_IFBLK = 0x6000

View File

@ -1641,6 +1641,27 @@ const (
SPLICE_F_MORE = 0x4 SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1 SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2 SPLICE_F_NONBLOCK = 0x2
STATX_ALL = 0xfff
STATX_ATIME = 0x20
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
STATX_ATTR_NODUMP = 0x40
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200 S_BLKSIZE = 0x200
S_IEXEC = 0x40 S_IEXEC = 0x40
S_IFBLK = 0x6000 S_IFBLK = 0x6000

View File

@ -1641,6 +1641,27 @@ const (
SPLICE_F_MORE = 0x4 SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1 SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2 SPLICE_F_NONBLOCK = 0x2
STATX_ALL = 0xfff
STATX_ATIME = 0x20
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
STATX_ATTR_NODUMP = 0x40
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
STATX_TYPE = 0x1
STATX_UID = 0x8
STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200 S_BLKSIZE = 0x200
S_IEXEC = 0x40 S_IEXEC = 0x40
S_IFBLK = 0x6000 S_IFBLK = 0x6000

Some files were not shown because too many files have changed in this diff Show More