Update module github.com/containers/common to v0.54.0

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot]
2023-06-28 23:02:07 +00:00
committed by GitHub
parent 7f5b970fb1
commit bfa04ea246
32 changed files with 1197 additions and 2704 deletions

View File

@@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
tar -zcf $@ -C rootfs .
rm -rf rootfs
-include deps/cmd/gcs.gomake
-include deps/cmd/gcstools.gomake
-include deps/cmd/hooks/wait-paths.gomake
-include deps/cmd/tar2ext4.gomake
-include deps/internal/tools/snp-report.gomake
# Implicit rule for includes that define Go targets.
%.gomake: $(SRCROOT)/Makefile
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
@mkdir -p $(dir $@)
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
mv $@.new $@
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
@mkdir -p bin

View File

@@ -0,0 +1,85 @@
package log
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"reflect"
"time"
"github.com/containerd/containerd/log"
)
const TimeFormat = log.RFC3339NanoFixed
func FormatTime(t time.Time) string {
return t.Format(TimeFormat)
}
// DurationFormat formats a [time.Duration] log entry.
//
// A nil value signals an error with the formatting.
type DurationFormat func(time.Duration) interface{}
func DurationFormatString(d time.Duration) interface{} { return d.String() }
func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() }
func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() }
// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`.
//
// See FormatEnabled for more information.
func FormatIO(ctx context.Context, v interface{}) string {
m := make(map[string]string)
m["type"] = reflect.TypeOf(v).String()
switch t := v.(type) {
case net.Conn:
m["localAddress"] = formatAddr(t.LocalAddr())
m["remoteAddress"] = formatAddr(t.RemoteAddr())
case interface{ Addr() net.Addr }:
m["address"] = formatAddr(t.Addr())
default:
return Format(ctx, t)
}
return Format(ctx, m)
}
func formatAddr(a net.Addr) string {
return a.Network() + "://" + a.String()
}
// Format formats an object into a JSON string, without any indendtation or
// HTML escapes.
// Context is used to output a log waring if the conversion fails.
//
// This is intended primarily for `trace.StringAttribute()`
func Format(ctx context.Context, v interface{}) string {
b, err := encode(v)
if err != nil {
G(ctx).WithError(err).Warning("could not format value")
return ""
}
return string(b)
}
func encode(v interface{}) ([]byte, error) {
return encodeBuffer(&bytes.Buffer{}, v)
}
func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) {
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.SetIndent("", "")
if err := enc.Encode(v); err != nil {
err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err)
return nil, err
}
// encoder.Encode appends a newline to the end
return bytes.TrimSpace(buf.Bytes()), nil
}

View File

@@ -1,23 +1,58 @@
package log
import (
"bytes"
"reflect"
"time"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// Hook serves to intercept and format `logrus.Entry`s before they are passed
// to the ETW hook.
const nullString = "null"
// Hook intercepts and formats a [logrus.Entry] before it logged.
//
// The containerd shim discards the (formatted) logrus output, and outputs only via ETW.
// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and
// then re-output via the ETW hook.
type Hook struct{}
// The shim either outputs the logs through an ETW hook, discarding the (formatted) output
// or logs output to a pipe for logging binaries to consume.
// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output
// by the shim.
type Hook struct {
// EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON.
// Variables of [bytes.Buffer] will be converted to []byte.
//
// Default is false.
EncodeAsJSON bool
// FormatTime specifies the format for [time.Time] variables.
// An empty string disables formatting.
// When disabled, the fall back will the JSON encoding, if enabled.
//
// Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
TimeFormat string
// Duration format converts a [time.Duration] fields to an appropriate encoding.
// nil disables formatting.
// When disabled, the fall back will the JSON encoding, if enabled.
//
// Default is [DurationFormatString], which appends a duration unit after the value.
DurationFormat DurationFormat
// AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to
// the entry from the span context stored in [logrus.Entry.Context], if it exists.
AddSpanContext bool
}
var _ logrus.Hook = &Hook{}
func NewHook() *Hook {
return &Hook{}
return &Hook{
TimeFormat: log.RFC3339NanoFixed,
DurationFormat: DurationFormatString,
AddSpanContext: true,
}
}
func (h *Hook) Levels() []logrus.Level {
@@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level {
}
func (h *Hook) Fire(e *logrus.Entry) (err error) {
// JSON encode, if necessary, then add span information
h.encode(e)
h.addSpanContext(e)
return nil
}
// encode loops through all the fields in the [logrus.Entry] and encodes them according to
// the settings in [Hook].
// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for
// fields of type [time.Time].
//
// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or
// errors will be encoded via a [json.Marshal] (with HTML escaping disabled).
// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded.
//
// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false,
// then this is a no-op.
func (h *Hook) encode(e *logrus.Entry) {
d := e.Data
formatTime := h.TimeFormat != ""
formatDuration := h.DurationFormat != nil
if !(h.EncodeAsJSON || formatTime || formatDuration) {
return
}
for k, v := range d {
// encode types with dedicated formatting options first
if vv, ok := v.(time.Time); formatTime && ok {
d[k] = vv.Format(h.TimeFormat)
continue
}
if vv, ok := v.(time.Duration); formatDuration && ok {
d[k] = h.DurationFormat(vv)
continue
}
// general case JSON encoding
if !h.EncodeAsJSON {
continue
}
switch vv := v.(type) {
// built in types
// "json" marshals errors as "{}", so leave alone here
case bool, string, error, uintptr,
int8, int16, int32, int64, int,
uint8, uint32, uint64, uint,
float32, float64:
continue
// Rather than setting d[k] = vv.String(), JSON encode []byte value, since it
// may be a binary payload and not representable as a string.
// `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`,
// so cannot use `vv.Bytes`.
// Could move to below the `reflect.Indirect()` call below, but
// that would require additional typematching and dereferencing.
// Easier to keep these duplicate branches here.
case bytes.Buffer:
v = vv.Bytes()
case *bytes.Buffer:
v = vv.Bytes()
}
// dereference pointer or interface variables
rv := reflect.Indirect(reflect.ValueOf(v))
// check if `v` is a null pointer
if !rv.IsValid() {
d[k] = nullString
continue
}
switch rv.Kind() {
case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice:
default:
// Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal
// Chan, Func: not supported by json
// Interface, Pointer: dereferenced above
// UnsafePointer: not supported by json, not safe to de-reference; leave alone
continue
}
b, err := encode(v)
if err != nil {
// Errors are written to stderr (ie, to `panic.log`) and stops the remaining
// hooks (ie, exporting to ETW) from firing. So add encoding errors to
// the entry data to be written out, but keep on processing.
d[k+"-"+logrus.ErrorKey] = err.Error()
// keep the original `v` as the value,
continue
}
d[k] = string(b)
}
}
func (h *Hook) addSpanContext(e *logrus.Entry) {
ctx := e.Context
if ctx == nil {
if !h.AddSpanContext || ctx == nil {
return
}
span := trace.FromContext(ctx)

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"errors"
"strings"
"sync/atomic"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
@@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) {
}
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
buf := bytes.NewBuffer(b[:0])
if err := encode(buf, pp); err != nil {
b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp)
if err != nil {
return "", err
}
return strings.TrimSpace(buf.String()), nil
return string(b), nil
}
// ScrubBridgeCreate scrubs requests sent over the bridge of type
@@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
return nil, err
}
buf := &bytes.Buffer{}
if err := encode(buf, m); err != nil {
b, err := encode(m)
if err != nil {
return nil, err
}
return bytes.TrimSpace(buf.Bytes()), nil
}
func encode(buf *bytes.Buffer, v interface{}) error {
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
if err := enc.Encode(v); err != nil {
return err
}
return nil
return b, nil
}
func isRequestBase(m genMap) bool {

View File

@@ -0,0 +1,69 @@
package oc
import (
"errors"
"io"
"net"
"os"
"github.com/containerd/containerd/errdefs"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there
// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error)
func toStatusCode(err error) codes.Code {
// checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
// wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
// context timeout or cancelled error
if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
return s.Code()
}
switch {
// case isAny(err):
// return codes.Cancelled
case isAny(err, os.ErrInvalid):
return codes.InvalidArgument
case isAny(err, os.ErrDeadlineExceeded):
return codes.DeadlineExceeded
case isAny(err, os.ErrNotExist):
return codes.NotFound
case isAny(err, os.ErrExist):
return codes.AlreadyExists
case isAny(err, os.ErrPermission):
return codes.PermissionDenied
// case isAny(err):
// return codes.ResourceExhausted
case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer):
return codes.FailedPrecondition
// case isAny(err):
// return codes.Aborted
// case isAny(err):
// return codes.OutOfRange
// case isAny(err):
// return codes.Unimplemented
case isAny(err, io.ErrNoProgress):
return codes.Internal
// case isAny(err):
// return codes.Unavailable
case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF):
return codes.DataLoss
// case isAny(err):
// return codes.Unauthenticated
default:
return codes.Unknown
}
}
// isAny returns true if errors.Is is true for any of the provided errors, errs.
func isAny(err error, errs ...error) bool {
for _, e := range errs {
if errors.Is(err, e) {
return true
}
}
return false
}

View File

@@ -3,19 +3,26 @@ package oc
import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/logfields"
)
var _ = (trace.Exporter)(&LogrusExporter{})
const spanMessage = "Span"
var _errorCodeKey = logrus.ErrorKey + "Code"
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
// `trace.SpanData` to logrus output.
type LogrusExporter struct {
}
type LogrusExporter struct{}
var _ trace.Exporter = &LogrusExporter{}
// ExportSpan exports `s` based on the the following rules:
//
// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
// `s.ParentSpanID` for correlation
// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`,
// `s.SpanID`, and `s.ParentSpanID` for correlation
//
// 2. Any calls to .Annotate will not be supported.
//
@@ -23,21 +30,57 @@ type LogrusExporter struct {
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
// providing `s.Status.Message` as the error value.
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
// Combine all span annotations with traceID, spanID, parentSpanID
baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
baseEntry.Data["traceID"] = s.TraceID.String()
baseEntry.Data["spanID"] = s.SpanID.String()
baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
baseEntry.Data["startTime"] = s.StartTime
baseEntry.Data["endTime"] = s.EndTime
baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
baseEntry.Data["name"] = s.Name
baseEntry.Time = s.StartTime
if s.DroppedAnnotationCount > 0 {
logrus.WithFields(logrus.Fields{
"name": s.Name,
logfields.TraceID: s.TraceID.String(),
logfields.SpanID: s.SpanID.String(),
"dropped": s.DroppedAttributeCount,
"maxAttributes": len(s.Attributes),
}).Warning("span had dropped attributes")
}
entry := log.L.Dup()
// Combine all span annotations with span data (eg, trace ID, span ID, parent span ID,
// error, status code)
// (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can
// can skip overhead in entry.WithFields() and add them directly to entry.Data.
// Preallocate ahead of time, since we should add, at most, 10 additional entries
data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10)
// Default log entry may have prexisting/application-wide data
for k, v := range entry.Data {
data[k] = v
}
for k, v := range s.Attributes {
data[k] = v
}
data[logfields.Name] = s.Name
data[logfields.TraceID] = s.TraceID.String()
data[logfields.SpanID] = s.SpanID.String()
data[logfields.ParentSpanID] = s.ParentSpanID.String()
data[logfields.StartTime] = s.StartTime
data[logfields.EndTime] = s.EndTime
data[logfields.Duration] = s.EndTime.Sub(s.StartTime)
if sk := spanKindToString(s.SpanKind); sk != "" {
data["spanKind"] = sk
}
level := logrus.InfoLevel
if s.Status.Code != 0 {
level = logrus.ErrorLevel
baseEntry.Data[logrus.ErrorKey] = s.Status.Message
// don't overwrite an existing "error" or "errorCode" attributes
if _, ok := data[logrus.ErrorKey]; !ok {
data[logrus.ErrorKey] = s.Status.Message
}
if _, ok := data[_errorCodeKey]; !ok {
data[_errorCodeKey] = codes.Code(s.Status.Code).String()
}
}
baseEntry.Log(level, "Span")
entry.Data = data
entry.Time = s.StartTime
entry.Log(level, spanMessage)
}

View File

@@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample()
func SetSpanStatus(span *trace.Span, err error) {
status := trace.Status{}
if err != nil {
// TODO: JTERRY75 - Handle errors in a non-generic way
status.Code = trace.StatusCodeUnknown
status.Code = int32(toStatusCode(err))
status.Message = err.Error()
}
span.SetStatus(status)
@@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
func spanKindToString(sk int) string {
switch sk {
case trace.SpanKindClient:
return "client"
case trace.SpanKindServer:
return "server"
default:
return ""
}
}

View File

@@ -23,20 +23,14 @@ type (
)
type explicitAccess struct {
//nolint:structcheck
accessPermissions accessMask
//nolint:structcheck
accessMode accessMode
//nolint:structcheck
inheritance inheritMode
//nolint:structcheck
trustee trustee
accessMode accessMode
inheritance inheritMode
trustee trustee
}
type trustee struct {
//nolint:unused,structcheck
multipleTrustee *trustee
//nolint:unused,structcheck
multipleTrustee *trustee
multipleTrusteeOperation int32
trusteeForm trusteeForm
trusteeType trusteeType

191
vendor/github.com/containerd/containerd/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

16
vendor/github.com/containerd/containerd/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,16 @@
Docker
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@@ -0,0 +1,92 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errdefs defines the common errors used throughout containerd
// packages.
//
// Use with fmt.Errorf to add context to an error.
//
// To detect an error class, use the IsXXX functions to tell whether an error
// is of a certain type.
//
// The functions ToGRPC and FromGRPC can be used to map server-side and
// client-side errors to the correct types.
package errdefs
import (
"context"
"errors"
)
// Definitions of common error types used throughout containerd. All containerd
// errors returned by most packages will map into one of these errors classes.
// Packages should return errors of these types when they want to instruct a
// client to take a particular action.
//
// For the most part, we just try to provide local grpc errors. Most conditions
// map very well to those defined by grpc.
var (
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
ErrInvalidArgument = errors.New("invalid argument")
ErrNotFound = errors.New("not found")
ErrAlreadyExists = errors.New("already exists")
ErrFailedPrecondition = errors.New("failed precondition")
ErrUnavailable = errors.New("unavailable")
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
)
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
return errors.Is(err, ErrInvalidArgument)
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
return errors.Is(err, ErrNotFound)
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
return errors.Is(err, ErrAlreadyExists)
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
return errors.Is(err, ErrFailedPrecondition)
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
return errors.Is(err, ErrUnavailable)
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
return errors.Is(err, ErrNotImplemented)
}
// IsCanceled returns true if the error is due to `context.Canceled`.
func IsCanceled(err error) bool {
return errors.Is(err, context.Canceled)
}
// IsDeadlineExceeded returns true if the error is due to
// `context.DeadlineExceeded`.
func IsDeadlineExceeded(err error) bool {
return errors.Is(err, context.DeadlineExceeded)
}

147
vendor/github.com/containerd/containerd/errdefs/grpc.go generated vendored Normal file
View File

@@ -0,0 +1,147 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errdefs
import (
"context"
"fmt"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ToGRPC will attempt to map the backend containerd error into a grpc error,
// using the original error message as a description.
//
// Further information may be extracted from certain errors depending on their
// type.
//
// If the error is unmapped, the original error will be returned to be handled
// by the regular grpc error handling stack.
func ToGRPC(err error) error {
if err == nil {
return nil
}
if isGRPCError(err) {
// error has already been mapped to grpc
return err
}
switch {
case IsInvalidArgument(err):
return status.Errorf(codes.InvalidArgument, err.Error())
case IsNotFound(err):
return status.Errorf(codes.NotFound, err.Error())
case IsAlreadyExists(err):
return status.Errorf(codes.AlreadyExists, err.Error())
case IsFailedPrecondition(err):
return status.Errorf(codes.FailedPrecondition, err.Error())
case IsUnavailable(err):
return status.Errorf(codes.Unavailable, err.Error())
case IsNotImplemented(err):
return status.Errorf(codes.Unimplemented, err.Error())
case IsCanceled(err):
return status.Errorf(codes.Canceled, err.Error())
case IsDeadlineExceeded(err):
return status.Errorf(codes.DeadlineExceeded, err.Error())
}
return err
}
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
// and combining it with the target error string.
//
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
func ToGRPCf(err error, format string, args ...interface{}) error {
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
}
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
func FromGRPC(err error) error {
if err == nil {
return nil
}
var cls error // divide these into error classes, becomes the cause
switch code(err) {
case codes.InvalidArgument:
cls = ErrInvalidArgument
case codes.AlreadyExists:
cls = ErrAlreadyExists
case codes.NotFound:
cls = ErrNotFound
case codes.Unavailable:
cls = ErrUnavailable
case codes.FailedPrecondition:
cls = ErrFailedPrecondition
case codes.Unimplemented:
cls = ErrNotImplemented
case codes.Canceled:
cls = context.Canceled
case codes.DeadlineExceeded:
cls = context.DeadlineExceeded
default:
cls = ErrUnknown
}
msg := rebaseMessage(cls, err)
if msg != "" {
err = fmt.Errorf("%s: %w", msg, cls)
} else {
err = cls
}
return err
}
// rebaseMessage removes the repeats for an error at the end of an error
// string. This will happen when taking an error over grpc then remapping it.
//
// Effectively, we just remove the string of cls from the end of err if it
// appears there.
func rebaseMessage(cls error, err error) string {
desc := errDesc(err)
clss := cls.Error()
if desc == clss {
return ""
}
return strings.TrimSuffix(desc, ": "+clss)
}
func isGRPCError(err error) bool {
_, ok := status.FromError(err)
return ok
}
func code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
func errDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}

72
vendor/github.com/containerd/containerd/log/context.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"context"
"github.com/sirupsen/logrus"
)
var (
// G is an alias for GetLogger.
//
// We may want to define this locally to a package to get package tagged log
// messages.
G = GetLogger
// L is an alias for the standard logger.
L = logrus.NewEntry(logrus.StandardLogger())
)
type (
loggerKey struct{}
// Fields type to pass to `WithFields`, alias from `logrus`.
Fields = logrus.Fields
)
const (
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time is always the same number of characters.
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// TextFormat represents the text logging format
TextFormat = "text"
// JSONFormat represents the JSON logging format
JSONFormat = "json"
)
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
e := logger.WithContext(ctx)
return context.WithValue(ctx, loggerKey{}, e)
}
// GetLogger retrieves the current logger from the context. If no logger is
// available, the default logger is returned.
func GetLogger(ctx context.Context) *logrus.Entry {
logger := ctx.Value(loggerKey{})
if logger == nil {
return L.WithContext(ctx)
}
return logger.(*logrus.Entry)
}

View File

@@ -23,19 +23,19 @@ func CompleteCommandFlags(cmd *cobra.Command, flags FlagCompletions) {
/* Autocomplete Functions for cobra ValidArgsFunction */
// AutocompleteNone - Block the default shell completion (no paths)
func AutocompleteNone(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteNone(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
}
// AutocompleteDefault - Use the default shell completion,
// allows path completion.
func AutocompleteDefault(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteDefault(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveDefault
}
// AutocompleteCapabilities - Autocomplete linux capabilities options.
// Used by --cap-add and --cap-drop.
func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteCapabilities(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {
caps := capabilities.AllCapabilities()
// convertCase will convert a string to lowercase only if the user input is lowercase
@@ -83,17 +83,17 @@ func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective)
}
// AutocompleteSubgidName - Autocomplete subgidname based on the names in the /etc/subgid file.
func AutocompleteSubgidName(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteSubgidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
return autocompleteSubIDName("/etc/subgid")
}
// AutocompleteSubuidName - Autocomplete subuidname based on the names in the /etc/subuid file.
func AutocompleteSubuidName(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteSubuidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
return autocompleteSubIDName("/etc/subuid")
}
// AutocompleteArch - Autocomplete platform supported by container engines
func AutocompletePlatform(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompletePlatform(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
completions := []string{
"linux/386",
"linux/amd64",
@@ -115,7 +115,7 @@ func AutocompletePlatform(cmd *cobra.Command, args []string, toComplete string)
}
// AutocompleteArch - Autocomplete architectures supported by container engines
func AutocompleteArch(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteArch(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
completions := []string{
"386",
"amd64",
@@ -135,19 +135,19 @@ func AutocompleteArch(cmd *cobra.Command, args []string, toComplete string) ([]s
}
// AutocompleteOS - Autocomplete OS supported by container engines
func AutocompleteOS(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteOS(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
completions := []string{"linux", "windows"}
return completions, cobra.ShellCompDirectiveNoFileComp
}
// AutocompleteJSONFormat - Autocomplete format flag option.
// -> "json"
func AutocompleteJSONFormat(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteJSONFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
return []string{"json"}, cobra.ShellCompDirectiveNoFileComp
}
// AutocompleteOneArg - Autocomplete one random arg
func AutocompleteOneArg(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteOneArg(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
if len(args) == 1 {
return nil, cobra.ShellCompDirectiveDefault
}

View File

@@ -17,8 +17,9 @@ import (
// Options defines the option to retry.
type Options struct {
MaxRetry int // The number of times to possibly retry.
Delay time.Duration // The delay to use between retries, if set.
MaxRetry int // The number of times to possibly retry.
Delay time.Duration // The delay to use between retries, if set.
IsErrorRetryable func(error) bool
}
// RetryOptions is deprecated, use Options.
@@ -31,6 +32,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, options *Opti
// IfNecessary retries the operation in exponential backoff with the retry Options.
func IfNecessary(ctx context.Context, operation func() error, options *Options) error {
var isRetryable func(error) bool
if options.IsErrorRetryable != nil {
isRetryable = options.IsErrorRetryable
} else {
isRetryable = IsErrorRetryable
}
err := operation()
for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ {
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
@@ -49,7 +56,11 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options)
return err
}
func isRetryable(err error) bool {
// IsErrorRetryable makes a HEURISTIC determination whether it is worth retrying upon encountering an error.
// That heuristic is NOT STABLE and it CAN CHANGE AT ANY TIME.
// Callers that have a hard requirement for specific treatment of a class of errors should make their own check
// instead of relying on this function maintaining its past behavior.
func IsErrorRetryable(err error) bool {
switch err {
case nil:
return false
@@ -72,18 +83,18 @@ func isRetryable(err error) bool {
}
return true
case *net.OpError:
return isRetryable(e.Err)
return IsErrorRetryable(e.Err)
case *url.Error: // This includes errors returned by the net/http client.
if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
return true
}
return isRetryable(e.Err)
return IsErrorRetryable(e.Err)
case syscall.Errno:
return isErrnoRetryable(e)
case errcode.Errors:
// if this error is a group of errors, process them all in turn
for i := range e {
if !isRetryable(e[i]) {
if !IsErrorRetryable(e[i]) {
return false
}
}
@@ -91,7 +102,7 @@ func isRetryable(err error) bool {
case *multierror.Error:
// if this error is a group of errors, process them all in turn
for i := range e.Errors {
if !isRetryable(e.Errors[i]) {
if !IsErrorRetryable(e.Errors[i]) {
return false
}
}
@@ -102,11 +113,11 @@ func isRetryable(err error) bool {
}
if unwrappable, ok := e.(unwrapper); ok {
err = unwrappable.Unwrap()
return isRetryable(err)
return IsErrorRetryable(err)
}
case unwrapper: // Test this last, because various error types might implement .Unwrap()
err = e.Unwrap()
return isRetryable(err)
return IsErrorRetryable(err)
}
return false