runtime: bump containerd for gogo deprecation

This update includes necessary changes due to the version bump of
containerd and its dependencies. It's part of a broader initiative to
phase out gogo protobuf, which has been deprecated, and to align with
the current supported libraries.

Fixes #7420.

Signed-off-by: Beraldo Leal <bleal@redhat.com>
This commit is contained in:
Beraldo Leal 2023-11-02 00:28:35 +00:00
parent 16fa2c39e6
commit 7641c19f74
1379 changed files with 175812 additions and 63461 deletions

View File

@ -46,6 +46,7 @@ type cacheServer struct {
rpc *grpc.Server
factory vc.Factory
done chan struct{}
pb.UnimplementedCacheServiceServer
}
var jsonVMConfig *pb.GrpcVMConfig

View File

@ -4,22 +4,22 @@ go 1.19
require (
code.cloudfoundry.org/bytefmt v0.0.0-20211005130812-5bb3c17173e5
github.com/BurntSushi/toml v1.2.0
github.com/BurntSushi/toml v1.2.1
github.com/blang/semver v3.5.1+incompatible
github.com/blang/semver/v4 v4.0.0
github.com/container-orchestrated-devices/container-device-interface v0.6.0
github.com/containerd/cgroups v1.0.5-0.20220625035431-cf7417bca682
github.com/containerd/cgroups v1.1.0
github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.6.8
github.com/containerd/containerd v1.7.2
github.com/containerd/cri-containerd v1.19.0
github.com/containerd/fifo v1.0.0
github.com/containerd/ttrpc v1.1.0
github.com/containerd/typeurl v1.0.2
github.com/containernetworking/plugins v1.1.1
github.com/containerd/fifo v1.1.0
github.com/containerd/ttrpc v1.2.2
github.com/containerd/typeurl/v2 v2.1.1
github.com/containernetworking/plugins v1.2.0
github.com/containers/podman/v4 v4.2.0
github.com/coreos/go-systemd/v22 v22.3.2
github.com/docker/go-units v0.4.0
github.com/fsnotify/fsnotify v1.5.4
github.com/coreos/go-systemd/v22 v22.5.0
github.com/docker/go-units v0.5.0
github.com/fsnotify/fsnotify v1.6.0
github.com/go-ini/ini v1.28.2
github.com/go-openapi/errors v0.20.2
github.com/go-openapi/runtime v0.19.21
@ -27,55 +27,57 @@ require (
github.com/go-openapi/swag v0.21.1
github.com/go-openapi/validate v0.22.0
github.com/godbus/dbus/v5 v5.1.0
github.com/gogo/protobuf v1.3.2
github.com/hashicorp/go-multierror v1.1.1
github.com/intel-go/cpuid v0.0.0-20210602155658-5747e5cec0d9
github.com/mdlayher/vsock v1.1.0
github.com/opencontainers/runc v1.1.9
github.com/opencontainers/runtime-spec v1.1.0-rc.1
github.com/opencontainers/selinux v1.10.1
github.com/opencontainers/selinux v1.11.0
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.32.1
github.com/prometheus/procfs v0.7.3
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0
github.com/prometheus/common v0.37.0
github.com/prometheus/procfs v0.8.0
github.com/safchain/ethtool v0.2.0
github.com/sirupsen/logrus v1.9.0
github.com/stretchr/testify v1.8.0
github.com/urfave/cli v1.22.4
github.com/stretchr/testify v1.8.2
github.com/urfave/cli v1.22.12
github.com/vishvananda/netlink v1.2.1-beta.2
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220601114329-47893b162965
go.opentelemetry.io/otel v1.3.0
go.opentelemetry.io/otel v1.14.0
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
go.opentelemetry.io/otel/sdk v1.3.0
go.opentelemetry.io/otel/trace v1.3.0
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
golang.org/x/sys v0.6.0
google.golang.org/grpc v1.47.0
google.golang.org/protobuf v1.28.1
k8s.io/apimachinery v0.22.5
k8s.io/cri-api v0.23.1
go.opentelemetry.io/otel/sdk v1.14.0
go.opentelemetry.io/otel/trace v1.14.0
golang.org/x/oauth2 v0.4.0
golang.org/x/sys v0.7.0
google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.29.1
k8s.io/apimachinery v0.26.2
k8s.io/cri-api v0.27.1
)
require (
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cilium/ebpf v0.7.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cilium/ebpf v0.9.1 // indirect
github.com/containerd/cgroups/v3 v3.0.2 // indirect
github.com/containerd/continuity v0.4.2-0.20230616210509-1e0d26eb2381 // indirect
github.com/containerd/go-runc v1.0.0 // indirect
github.com/containernetworking/cni v1.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/frankban/quicktest v1.13.1 // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
@ -83,31 +85,40 @@ require (
github.com/go-openapi/loads v0.21.1 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/socket v0.2.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.8.1-0.20210923151022-86f73c517451 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
go.mongodb.org/mongo-driver v1.7.5 // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
@ -116,12 +127,10 @@ require (
replace (
github.com/go-openapi/swag => github.com/go-openapi/swag v0.21.1
github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.2
github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.9
github.com/stretchr/testify => github.com/stretchr/testify v1.8.0
github.com/uber-go/atomic => go.uber.org/atomic v1.5.1
golang.org/x/text => golang.org/x/text v0.7.0
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
google.golang.org/grpc => google.golang.org/grpc v1.47.0
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
)

File diff suppressed because it is too large Load Diff

View File

@ -9,9 +9,9 @@ import (
"io"
"time"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/errdefs"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
"github.com/opencontainers/runtime-spec/specs-go"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
@ -60,7 +60,7 @@ func newContainer(s *service, r *taskAPI.CreateTaskRequest, containerType vc.Con
terminal: r.Terminal,
cType: containerType,
execs: make(map[string]*exec),
status: task.StatusCreated,
status: task.Status_CREATED,
exitIOch: make(chan struct{}),
exitCh: make(chan uint32, 1),
stdinCloser: make(chan struct{}),

View File

@ -8,7 +8,7 @@ package containerdshim
import (
"testing"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/stretchr/testify/assert"
)

View File

@ -20,10 +20,10 @@ import (
"syscall"
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
containerd_types "github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/mount"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
"github.com/containerd/typeurl"
"github.com/containerd/typeurl/v2"
"github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"

View File

@ -13,10 +13,10 @@ import (
"path"
"testing"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/containerd/namespaces"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
"github.com/containerd/containerd/protobuf"
crioption "github.com/containerd/cri-containerd/pkg/api/runtimeoptions/v1"
"github.com/containerd/typeurl"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
@ -395,7 +395,7 @@ func TestCreateLoadRuntimeConfig(t *testing.T) {
fakeConfig := "foobar"
anno[vcAnnotations.SandboxConfigPathKey] = fakeConfig
option := &crioption.Options{ConfigPath: fakeConfig}
r.Options, err = typeurl.MarshalAny(option)
r.Options, err = protobuf.MarshalAnyToProto(option)
assert.NoError(err)
err = os.Setenv("KATA_CONF_FILE", fakeConfig)
assert.NoError(err)
@ -413,12 +413,12 @@ func TestCreateLoadRuntimeConfig(t *testing.T) {
// 2. shimv2 create task option
option.ConfigPath = config
r.Options, err = typeurl.MarshalAny(option)
r.Options, err = protobuf.MarshalAnyToProto(option)
assert.NoError(err)
_, err = loadRuntimeConfig(s, r, anno)
assert.NoError(err)
option.ConfigPath = ""
r.Options, err = typeurl.MarshalAny(option)
r.Options, err = protobuf.MarshalAnyToProto(option)
assert.NoError(err)
// 3. environment

View File

@ -16,7 +16,7 @@ import (
func deleteContainer(ctx context.Context, s *service, c *container) error {
if !c.cType.IsSandbox() {
if c.status != task.StatusStopped {
if c.status != task.Status_STOPPED {
if _, err := s.sandbox.StopContainer(ctx, c.id, false); err != nil && !isNotFound(err) {
return err
}

View File

@ -9,7 +9,7 @@ package containerdshim
import (
"testing"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/stretchr/testify/assert"
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"

View File

@ -13,10 +13,10 @@ import (
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/typeurl"
"github.com/containerd/typeurl/v2"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
anypb "google.golang.org/protobuf/types/known/anypb"
specs "github.com/opencontainers/runtime-spec/specs-go"
anypb "google.golang.org/protobuf/types/known/anypb"
)
type exec struct {
@ -126,7 +126,7 @@ func newExec(c *container, stdin, stdout, stderr string, terminal bool, jspec *a
exitIOch: make(chan struct{}),
stdinCloser: make(chan struct{}),
exitCh: make(chan uint32, 1),
status: task.StatusCreated,
status: task.Status_CREATED,
}
return exec, nil

View File

@ -12,7 +12,7 @@ import (
"github.com/containerd/containerd/namespaces"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock"
"github.com/stretchr/testify/assert"

View File

@ -10,8 +10,7 @@ import (
cgroupsv1 "github.com/containerd/cgroups/stats/v1"
cgroupsv2 "github.com/containerd/cgroups/v2/stats"
"github.com/containerd/typeurl"
"github.com/containerd/containerd/protobuf"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/pkg/resourcecontrol"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
anypb "google.golang.org/protobuf/types/known/anypb"
@ -36,7 +35,7 @@ func marshalMetrics(ctx context.Context, s *service, containerID string) (*anypb
metrics = statsToMetricsV2(&stats)
}
data, err := typeurl.MarshalAny(metrics)
data, err := protobuf.MarshalAnyToProto(metrics)
if err != nil {
return nil, err
}

View File

@ -10,8 +10,8 @@ import (
"context"
"testing"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/containerd/namespaces"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock"

View File

@ -17,20 +17,13 @@ import (
"time"
eventstypes "github.com/containerd/containerd/api/events"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/namespaces"
cdruntime "github.com/containerd/containerd/runtime"
cdshim "github.com/containerd/containerd/runtime/v2/shim"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
"github.com/containerd/typeurl"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
otelTrace "go.opentelemetry.io/otel/trace"
"golang.org/x/sys/unix"
emptypb "google.golang.org/protobuf/types/known/emptypb"
"github.com/containerd/typeurl/v2"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
"github.com/kata-containers/kata-containers/src/runtime/pkg/oci"
@ -38,6 +31,13 @@ import (
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
otelTrace "go.opentelemetry.io/otel/trace"
"golang.org/x/sys/unix"
emptypb "google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
)
// shimTracingTags defines tags for the trace span
@ -372,7 +372,7 @@ func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err e
}
return &taskAPI.DeleteResponse{
ExitedAt: time.Now(),
ExitedAt: timestamppb.New(time.Now()),
ExitStatus: 128 + uint32(unix.SIGKILL),
}, nil
}
@ -412,7 +412,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
return nil, res.err
}
container := res.container
container.status = task.StatusCreated
container.status = task.Status_CREATED
s.containers[r.ID] = container
@ -519,12 +519,12 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
ContainerID: c.id,
Pid: s.hpid,
ExitStatus: c.exit,
ExitedAt: c.exitTime,
ExitedAt: timestamppb.New(c.exitTime),
})
return &taskAPI.DeleteResponse{
ExitStatus: c.exit,
ExitedAt: c.exitTime,
ExitedAt: timestamppb.New(c.exitTime),
Pid: s.hpid,
}, nil
}
@ -538,7 +538,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
return &taskAPI.DeleteResponse{
ExitStatus: uint32(execs.exitCode),
ExitedAt: execs.exitTime,
ExitedAt: timestamppb.New(execs.exitTime),
Pid: s.hpid,
}, nil
}
@ -656,7 +656,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
Stderr: c.stderr,
Terminal: c.terminal,
ExitStatus: c.exit,
ExitedAt: c.exitTime,
ExitedAt: timestamppb.New(c.exitTime),
}, nil
}
@ -676,7 +676,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
Stderr: execs.tty.stderr,
Terminal: execs.tty.terminal,
ExitStatus: uint32(execs.exitCode),
ExitedAt: execs.exitTime,
ExitedAt: timestamppb.New(execs.exitTime),
}, nil
}
@ -701,11 +701,11 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *emptyp
return nil, err
}
c.status = task.StatusPausing
c.status = task.Status_PAUSING
err = s.sandbox.PauseContainer(spanCtx, r.ID)
if err == nil {
c.status = task.StatusPaused
c.status = task.Status_PAUSED
s.send(&eventstypes.TaskPaused{
ContainerID: c.id,
})
@ -713,7 +713,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *emptyp
}
if status, err := s.getContainerStatus(c.id); err != nil {
c.status = task.StatusUnknown
c.status = task.Status_UNKNOWN
} else {
c.status = status
}
@ -744,7 +744,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *empt
err = s.sandbox.ResumeContainer(spanCtx, c.id)
if err == nil {
c.status = task.StatusRunning
c.status = task.Status_RUNNING
s.send(&eventstypes.TaskResumed{
ContainerID: c.id,
})
@ -752,7 +752,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *empt
}
if status, err := s.getContainerStatus(c.id); err != nil {
c.status = task.StatusUnknown
c.status = task.Status_UNKNOWN
} else {
c.status = status
}
@ -811,7 +811,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *emptypb.
// send a SIGKILL signal first to try to stop the container, thus
// once the container has terminated, here should ignore this signal
// and return directly.
if (signum == syscall.SIGKILL || signum == syscall.SIGTERM) && processStatus == task.StatusStopped {
if (signum == syscall.SIGKILL || signum == syscall.SIGTERM) && processStatus == task.Status_STOPPED {
shimLog.WithFields(logrus.Fields{
"sandbox": s.sandbox.ID(),
"container": c.id,
@ -1086,7 +1086,7 @@ func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.
return &taskAPI.WaitResponse{
ExitStatus: ret,
ExitedAt: c.exitTime,
ExitedAt: timestamppb.New(c.exitTime),
}, nil
}
@ -1110,7 +1110,7 @@ func (s *service) checkProcesses(e exit) {
ID: id,
Pid: e.pid,
ExitStatus: uint32(e.status),
ExitedAt: e.timestamp,
ExitedAt: timestamppb.New(e.timestamp),
})
}
@ -1127,19 +1127,19 @@ func (s *service) getContainer(id string) (*container, error) {
func (s *service) getContainerStatus(containerID string) (task.Status, error) {
cStatus, err := s.sandbox.StatusContainer(containerID)
if err != nil {
return task.StatusUnknown, err
return task.Status_UNKNOWN, err
}
var status task.Status
switch cStatus.State.State {
case types.StateReady:
status = task.StatusCreated
status = task.Status_CREATED
case types.StateRunning:
status = task.StatusRunning
status = task.Status_RUNNING
case types.StatePaused:
status = task.StatusPaused
status = task.Status_PAUSED
case types.StateStopped:
status = task.StatusStopped
status = task.Status_STOPPED
}
return status, nil

View File

@ -12,7 +12,7 @@ import (
"strings"
"testing"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/stretchr/testify/assert"
)

View File

@ -66,7 +66,7 @@ func startContainer(ctx context.Context, s *service, c *container) (retErr error
shimLog.WithError(err).Warn("Failed to run post-start hooks")
}
c.status = task.StatusRunning
c.status = task.Status_RUNNING
stdin, stdout, stderr, err := s.sandbox.IOStream(c.id, c.id)
if err != nil {
@ -127,7 +127,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (e *
}
execs.id = proc.Token
execs.status = task.StatusRunning
execs.status = task.Status_RUNNING
if execs.tty.height != 0 && execs.tty.width != 0 {
err = s.sandbox.WinsizeProcess(ctx, c.id, execs.id, execs.tty.height, execs.tty.width)
if err != nil {

View File

@ -10,8 +10,8 @@ import (
"context"
"testing"
taskAPI "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/containerd/namespaces"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"

View File

@ -87,14 +87,14 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32,
shimLog.WithError(err).WithField("container", c.id).Warn("stop container failed")
}
}
c.status = task.StatusStopped
c.status = task.Status_STOPPED
c.exit = uint32(ret)
c.exitTime = timeStamp
c.exitCh <- uint32(ret)
shimLog.WithField("container", c.id).Debug("The container status is StatusStopped")
} else {
execs.status = task.StatusStopped
execs.status = task.Status_STOPPED
execs.exitCode = ret
execs.exitTime = timeStamp

View File

@ -17,7 +17,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
pb "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
pb "k8s.io/cri-api/pkg/apis/runtime/v1"
)
const (

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,93 @@
# go-fuzz-headers
This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers.
## Usage
Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine:
```go
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
data := []byte{'R', 'a', 'n', 'd', 'o', 'm'}
f := fuzz.NewConsumer(data)
```
This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types.
After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples:
### Structs
One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line:
```go
type Person struct {
Name string
Age int
}
p := Person{}
// Fill p with values based on the data provided by the fuzzing engine:
err := f.GenerateStruct(&p)
```
This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`:
```go
type PersonI struct {
Name string
Age int
BestFriend PersonII
}
type PersonII struct {
Name string
Age int
}
p := PersonI{}
err := f.GenerateStruct(&p)
```
If the consumer should insert values for unexported fields as well as exported, this can be enabled with:
```go
f.AllowUnexportedFields()
```
...and disabled with:
```go
f.DisallowUnexportedFields()
```
### Other types:
Other useful APIs:
```go
createdString, err := f.GetString() // Gets a string
createdInt, err := f.GetInt() // Gets an integer
createdByte, err := f.GetByte() // Gets a byte
createdBytes, err := f.GetBytes() // Gets a byte slice
createdBool, err := f.GetBool() // Gets a boolean
err := f.FuzzMap(target_map) // Fills a map
createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive
err := f.CreateFiles(inThisDir) // Fills inThisDir with files
createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength"
```
Most APIs are added as they are needed.
## Projects that use go-fuzz-headers
- [runC](https://github.com/opencontainers/runc)
- [Istio](https://github.com/istio/istio)
- [Vitess](https://github.com/vitessio/vitess)
- [Containerd](https://github.com/containerd/containerd)
Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it.
## Status
The project is under development and will be updated regularly.
## References
go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz).

View File

@ -0,0 +1,914 @@
// Copyright 2023 The go-fuzz-headers Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gofuzzheaders
import (
"archive/tar"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
"unsafe"
)
var (
MaxTotalLen uint32 = 2000000
maxDepth = 100
)
func SetMaxTotalLen(newLen uint32) {
MaxTotalLen = newLen
}
type ConsumeFuzzer struct {
data []byte
dataTotal uint32
CommandPart []byte
RestOfArray []byte
NumberOfCalls int
position uint32
fuzzUnexportedFields bool
curDepth int
Funcs map[reflect.Type]reflect.Value
}
func IsDivisibleBy(n int, divisibleby int) bool {
return (n % divisibleby) == 0
}
func NewConsumer(fuzzData []byte) *ConsumeFuzzer {
return &ConsumeFuzzer{
data: fuzzData,
dataTotal: uint32(len(fuzzData)),
Funcs: make(map[reflect.Type]reflect.Value),
curDepth: 0,
}
}
func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error {
if f.dataTotal == 0 {
return errors.New("could not split")
}
numberOfCalls := int(f.data[0])
if numberOfCalls < minCalls || numberOfCalls > maxCalls {
return errors.New("bad number of calls")
}
if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 {
return errors.New("length of data does not match required parameters")
}
// Define part 2 and 3 of the data array
commandPart := f.data[1 : numberOfCalls+1]
restOfArray := f.data[numberOfCalls+1:]
// Just a small check. It is necessary
if len(commandPart) != numberOfCalls {
return errors.New("length of commandPart does not match number of calls")
}
// Check if restOfArray is divisible by numberOfCalls
if !IsDivisibleBy(len(restOfArray), numberOfCalls) {
return errors.New("length of commandPart does not match number of calls")
}
f.CommandPart = commandPart
f.RestOfArray = restOfArray
f.NumberOfCalls = numberOfCalls
return nil
}
func (f *ConsumeFuzzer) AllowUnexportedFields() {
f.fuzzUnexportedFields = true
}
func (f *ConsumeFuzzer) DisallowUnexportedFields() {
f.fuzzUnexportedFields = false
}
func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
e := reflect.ValueOf(targetStruct).Elem()
return f.fuzzStruct(e, false)
}
func (f *ConsumeFuzzer) setCustom(v reflect.Value) error {
// First: see if we have a fuzz function for it.
doCustom, ok := f.Funcs[v.Type()]
if !ok {
return fmt.Errorf("could not find a custom function")
}
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() {
if !v.CanSet() {
return fmt.Errorf("could not use a custom function")
}
v.Set(reflect.New(v.Type().Elem()))
}
case reflect.Map:
if v.IsNil() {
if !v.CanSet() {
return fmt.Errorf("could not use a custom function")
}
v.Set(reflect.MakeMap(v.Type()))
}
default:
return fmt.Errorf("could not use a custom function")
}
verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
F: f,
})})
// check if we return an error
if verr[0].IsNil() {
return nil
}
return fmt.Errorf("could not use a custom function")
}
func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error {
if f.curDepth >= maxDepth {
// return err or nil here?
return nil
}
f.curDepth++
defer func() { f.curDepth-- }()
// We check if we should check for custom functions
if customFunctions && e.IsValid() && e.CanAddr() {
err := f.setCustom(e.Addr())
if err != nil {
return err
}
}
switch e.Kind() {
case reflect.Struct:
for i := 0; i < e.NumField(); i++ {
var v reflect.Value
if !e.Field(i).CanSet() {
if f.fuzzUnexportedFields {
v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem()
}
if err := f.fuzzStruct(v, customFunctions); err != nil {
return err
}
} else {
v = e.Field(i)
if err := f.fuzzStruct(v, customFunctions); err != nil {
return err
}
}
}
case reflect.String:
str, err := f.GetString()
if err != nil {
return err
}
if e.CanSet() {
e.SetString(str)
}
case reflect.Slice:
var maxElements uint32
// Byte slices should not be restricted
if e.Type().String() == "[]uint8" {
maxElements = 10000000
} else {
maxElements = 50
}
randQty, err := f.GetUint32()
if err != nil {
return err
}
numOfElements := randQty % maxElements
if (f.dataTotal - f.position) < numOfElements {
numOfElements = f.dataTotal - f.position
}
uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements))
for i := 0; i < int(numOfElements); i++ {
// If we have more than 10, then we can proceed with that.
if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil {
if i >= 10 {
if e.CanSet() {
e.Set(uu)
}
return nil
} else {
return err
}
}
}
if e.CanSet() {
e.Set(uu)
}
case reflect.Uint16:
newInt, err := f.GetUint16()
if err != nil {
return err
}
if e.CanSet() {
e.SetUint(uint64(newInt))
}
case reflect.Uint32:
newInt, err := f.GetUint32()
if err != nil {
return err
}
if e.CanSet() {
e.SetUint(uint64(newInt))
}
case reflect.Uint64:
newInt, err := f.GetInt()
if err != nil {
return err
}
if e.CanSet() {
e.SetUint(uint64(newInt))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
newInt, err := f.GetInt()
if err != nil {
return err
}
if e.CanSet() {
e.SetInt(int64(newInt))
}
case reflect.Float32:
newFloat, err := f.GetFloat32()
if err != nil {
return err
}
if e.CanSet() {
e.SetFloat(float64(newFloat))
}
case reflect.Float64:
newFloat, err := f.GetFloat64()
if err != nil {
return err
}
if e.CanSet() {
e.SetFloat(float64(newFloat))
}
case reflect.Map:
if e.CanSet() {
e.Set(reflect.MakeMap(e.Type()))
const maxElements = 50
randQty, err := f.GetInt()
if err != nil {
return err
}
numOfElements := randQty % maxElements
for i := 0; i < numOfElements; i++ {
key := reflect.New(e.Type().Key()).Elem()
if err := f.fuzzStruct(key, customFunctions); err != nil {
return err
}
val := reflect.New(e.Type().Elem()).Elem()
if err = f.fuzzStruct(val, customFunctions); err != nil {
return err
}
e.SetMapIndex(key, val)
}
}
case reflect.Ptr:
if e.CanSet() {
e.Set(reflect.New(e.Type().Elem()))
if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil {
return err
}
return nil
}
case reflect.Uint8:
b, err := f.GetByte()
if err != nil {
return err
}
if e.CanSet() {
e.SetUint(uint64(b))
}
}
return nil
}
func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) {
// The max size of the array:
const max uint32 = 20
arraySize := f.position
if arraySize > max {
arraySize = max
}
stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize))
if f.position+arraySize >= f.dataTotal {
return stringArray, errors.New("could not make string array")
}
for i := 0; i < int(arraySize); i++ {
stringSize := uint32(f.data[f.position])
if f.position+stringSize >= f.dataTotal {
return stringArray, nil
}
stringToAppend := string(f.data[f.position : f.position+stringSize])
strVal := reflect.ValueOf(stringToAppend)
stringArray = reflect.Append(stringArray, strVal)
f.position += stringSize
}
return stringArray, nil
}
func (f *ConsumeFuzzer) GetInt() (int, error) {
if f.position >= f.dataTotal {
return 0, errors.New("not enough bytes to create int")
}
returnInt := int(f.data[f.position])
f.position++
return returnInt, nil
}
func (f *ConsumeFuzzer) GetByte() (byte, error) {
if f.position >= f.dataTotal {
return 0x00, errors.New("not enough bytes to get byte")
}
returnByte := f.data[f.position]
f.position++
return returnByte, nil
}
func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) {
if f.position >= f.dataTotal {
return nil, errors.New("not enough bytes to get byte")
}
returnBytes := make([]byte, 0, numberOfBytes)
for i := 0; i < numberOfBytes; i++ {
newByte, err := f.GetByte()
if err != nil {
return nil, err
}
returnBytes = append(returnBytes, newByte)
}
return returnBytes, nil
}
func (f *ConsumeFuzzer) GetUint16() (uint16, error) {
u16, err := f.GetNBytes(2)
if err != nil {
return 0, err
}
littleEndian, err := f.GetBool()
if err != nil {
return 0, err
}
if littleEndian {
return binary.LittleEndian.Uint16(u16), nil
}
return binary.BigEndian.Uint16(u16), nil
}
func (f *ConsumeFuzzer) GetUint32() (uint32, error) {
u32, err := f.GetNBytes(4)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(u32), nil
}
func (f *ConsumeFuzzer) GetUint64() (uint64, error) {
u64, err := f.GetNBytes(8)
if err != nil {
return 0, err
}
littleEndian, err := f.GetBool()
if err != nil {
return 0, err
}
if littleEndian {
return binary.LittleEndian.Uint64(u64), nil
}
return binary.BigEndian.Uint64(u64), nil
}
func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
var length uint32
var err error
length, err = f.GetUint32()
if err != nil {
return nil, errors.New("not enough bytes to create byte array")
}
if length == 0 {
length = 30
}
bytesLeft := f.dataTotal - f.position
if bytesLeft <= 0 {
return nil, errors.New("not enough bytes to create byte array")
}
// If the length is the same as bytes left, we will not overflow
// the remaining bytes.
if length != bytesLeft {
length = length % bytesLeft
}
byteBegin := f.position
if byteBegin+length < byteBegin {
return nil, errors.New("numbers overflow")
}
f.position = byteBegin + length
return f.data[byteBegin:f.position], nil
}
func (f *ConsumeFuzzer) GetString() (string, error) {
if f.position >= f.dataTotal {
return "nil", errors.New("not enough bytes to create string")
}
length, err := f.GetUint32()
if err != nil {
return "nil", errors.New("not enough bytes to create string")
}
if f.position > MaxTotalLen {
return "nil", errors.New("created too large a string")
}
byteBegin := f.position
if byteBegin >= f.dataTotal {
return "nil", errors.New("not enough bytes to create string")
}
if byteBegin+length > f.dataTotal {
return "nil", errors.New("not enough bytes to create string")
}
if byteBegin > byteBegin+length {
return "nil", errors.New("numbers overflow")
}
f.position = byteBegin + length
return string(f.data[byteBegin:f.position]), nil
}
func (f *ConsumeFuzzer) GetBool() (bool, error) {
if f.position >= f.dataTotal {
return false, errors.New("not enough bytes to create bool")
}
if IsDivisibleBy(int(f.data[f.position]), 2) {
f.position++
return true, nil
} else {
f.position++
return false, nil
}
}
func (f *ConsumeFuzzer) FuzzMap(m interface{}) error {
return f.GenerateStruct(m)
}
func returnTarBytes(buf []byte) ([]byte, error) {
return buf, nil
// Count files
var fileCounter int
tr := tar.NewReader(bytes.NewReader(buf))
for {
_, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
fileCounter++
}
if fileCounter >= 1 {
return buf, nil
}
return nil, fmt.Errorf("not enough files were created\n")
}
func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error {
ind, err := f.GetInt()
if err != nil {
hdr.Format = tar.FormatGNU
//return nil
}
switch ind % 4 {
case 0:
hdr.Format = tar.FormatUnknown
case 1:
hdr.Format = tar.FormatUSTAR
case 2:
hdr.Format = tar.FormatPAX
case 3:
hdr.Format = tar.FormatGNU
}
return nil
}
func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error {
ind, err := f.GetInt()
if err != nil {
return err
}
switch ind % 13 {
case 0:
hdr.Typeflag = tar.TypeReg
case 1:
hdr.Typeflag = tar.TypeLink
linkname, err := f.GetString()
if err != nil {
return err
}
hdr.Linkname = linkname
case 2:
hdr.Typeflag = tar.TypeSymlink
linkname, err := f.GetString()
if err != nil {
return err
}
hdr.Linkname = linkname
case 3:
hdr.Typeflag = tar.TypeChar
case 4:
hdr.Typeflag = tar.TypeBlock
case 5:
hdr.Typeflag = tar.TypeDir
case 6:
hdr.Typeflag = tar.TypeFifo
case 7:
hdr.Typeflag = tar.TypeCont
case 8:
hdr.Typeflag = tar.TypeXHeader
case 9:
hdr.Typeflag = tar.TypeXGlobalHeader
case 10:
hdr.Typeflag = tar.TypeGNUSparse
case 11:
hdr.Typeflag = tar.TypeGNULongName
case 12:
hdr.Typeflag = tar.TypeGNULongLink
}
return nil
}
func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) {
return f.GetBytes()
/*length, err := f.GetUint32()
if err != nil {
return nil, errors.New("not enough bytes to create byte array")
}
// A bit of optimization to attempt to create a file body
// when we don't have as many bytes left as "length"
remainingBytes := f.dataTotal - f.position
if remainingBytes <= 0 {
return nil, errors.New("created too large a string")
}
if f.position+length > MaxTotalLen {
return nil, errors.New("created too large a string")
}
byteBegin := f.position
if byteBegin >= f.dataTotal {
return nil, errors.New("not enough bytes to create byte array")
}
if length == 0 {
return nil, errors.New("zero-length is not supported")
}
if byteBegin+length >= f.dataTotal {
return nil, errors.New("not enough bytes to create byte array")
}
if byteBegin+length < byteBegin {
return nil, errors.New("numbers overflow")
}
f.position = byteBegin + length
return f.data[byteBegin:f.position], nil*/
}
// getTarFileName is similar to GetString(), but creates string based
// on the length of f.data to reduce the likelihood of overflowing
// f.data.
func (f *ConsumeFuzzer) getTarFilename() (string, error) {
return f.GetString()
/*length, err := f.GetUint32()
if err != nil {
return "nil", errors.New("not enough bytes to create string")
}
// A bit of optimization to attempt to create a file name
// when we don't have as many bytes left as "length"
remainingBytes := f.dataTotal - f.position
if remainingBytes <= 0 {
return "nil", errors.New("created too large a string")
}
if f.position > MaxTotalLen {
return "nil", errors.New("created too large a string")
}
byteBegin := f.position
if byteBegin >= f.dataTotal {
return "nil", errors.New("not enough bytes to create string")
}
if byteBegin+length > f.dataTotal {
return "nil", errors.New("not enough bytes to create string")
}
if byteBegin > byteBegin+length {
return "nil", errors.New("numbers overflow")
}
f.position = byteBegin + length
return string(f.data[byteBegin:f.position]), nil*/
}
type TarFile struct {
Hdr *tar.Header
Body []byte
}
// TarBytes returns valid bytes for a tar archive
func (f *ConsumeFuzzer) TarBytes() ([]byte, error) {
numberOfFiles, err := f.GetInt()
if err != nil {
return nil, err
}
var tarFiles []*TarFile
tarFiles = make([]*TarFile, 0)
const maxNoOfFiles = 100
for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
var filename string
var filebody []byte
var sec, nsec int
var err error
filename, err = f.getTarFilename()
if err != nil {
var sb strings.Builder
sb.WriteString("file-")
sb.WriteString(strconv.Itoa(i))
filename = sb.String()
}
filebody, err = f.createTarFileBody()
if err != nil {
var sb strings.Builder
sb.WriteString("filebody-")
sb.WriteString(strconv.Itoa(i))
filebody = []byte(sb.String())
}
sec, err = f.GetInt()
if err != nil {
sec = 1672531200 // beginning of 2023
}
nsec, err = f.GetInt()
if err != nil {
nsec = 1703980800 // end of 2023
}
hdr := &tar.Header{
Name: filename,
Size: int64(len(filebody)),
Mode: 0o600,
ModTime: time.Unix(int64(sec), int64(nsec)),
}
if err := setTarHeaderTypeflag(hdr, f); err != nil {
return []byte(""), err
}
if err := setTarHeaderFormat(hdr, f); err != nil {
return []byte(""), err
}
tf := &TarFile{
Hdr: hdr,
Body: filebody,
}
tarFiles = append(tarFiles, tf)
}
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
defer tw.Close()
for _, tf := range tarFiles {
tw.WriteHeader(tf.Hdr)
tw.Write(tf.Body)
}
return buf.Bytes(), nil
}
// This is similar to TarBytes, but it returns a series of
// files instead of raw tar bytes. The advantage of this
// api is that it is cheaper in terms of cpu power to
// modify or check the files in the fuzzer with TarFiles()
// because it avoids creating a tar reader.
func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) {
numberOfFiles, err := f.GetInt()
if err != nil {
return nil, err
}
var tarFiles []*TarFile
tarFiles = make([]*TarFile, 0)
const maxNoOfFiles = 100
for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
filename, err := f.getTarFilename()
if err != nil {
return tarFiles, err
}
filebody, err := f.createTarFileBody()
if err != nil {
return tarFiles, err
}
sec, err := f.GetInt()
if err != nil {
return tarFiles, err
}
nsec, err := f.GetInt()
if err != nil {
return tarFiles, err
}
hdr := &tar.Header{
Name: filename,
Size: int64(len(filebody)),
Mode: 0o600,
ModTime: time.Unix(int64(sec), int64(nsec)),
}
if err := setTarHeaderTypeflag(hdr, f); err != nil {
hdr.Typeflag = tar.TypeReg
}
if err := setTarHeaderFormat(hdr, f); err != nil {
return tarFiles, err // should not happend
}
tf := &TarFile{
Hdr: hdr,
Body: filebody,
}
tarFiles = append(tarFiles, tf)
}
return tarFiles, nil
}
// CreateFiles creates pseudo-random files in rootDir.
// It creates subdirs and places the files there.
// It is the callers responsibility to ensure that
// rootDir exists.
func (f *ConsumeFuzzer) CreateFiles(rootDir string) error {
numberOfFiles, err := f.GetInt()
if err != nil {
return err
}
maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary
if maxNumberOfFiles == 0 {
return errors.New("maxNumberOfFiles is nil")
}
var noOfCreatedFiles int
for i := 0; i < maxNumberOfFiles; i++ {
// The file to create:
fileName, err := f.GetString()
if err != nil {
if noOfCreatedFiles > 0 {
// If files have been created, we don't return an error.
break
} else {
return errors.New("could not get fileName")
}
}
if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") {
continue
}
fullFilePath := filepath.Join(rootDir, fileName)
// Find the subdirectory of the file
if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." {
// create the dir first; avoid going outside the root dir
if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") {
continue
}
dirPath := filepath.Join(rootDir, subDir)
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
err2 := os.MkdirAll(dirPath, 0o777)
if err2 != nil {
continue
}
}
fullFilePath = filepath.Join(dirPath, fileName)
} else {
// Create symlink
createSymlink, err := f.GetBool()
if err != nil {
if noOfCreatedFiles > 0 {
break
} else {
return errors.New("could not create the symlink")
}
}
if createSymlink {
symlinkTarget, err := f.GetString()
if err != nil {
return err
}
err = os.Symlink(symlinkTarget, fullFilePath)
if err != nil {
return err
}
// stop loop here, since a symlink needs no further action
noOfCreatedFiles++
continue
}
// We create a normal file
fileContents, err := f.GetBytes()
if err != nil {
if noOfCreatedFiles > 0 {
break
} else {
return errors.New("could not create the file")
}
}
err = os.WriteFile(fullFilePath, fileContents, 0o666)
if err != nil {
continue
}
noOfCreatedFiles++
}
}
return nil
}
// GetStringFrom returns a string that can only consist of characters
// included in possibleChars. It returns an error if the created string
// does not have the specified length.
func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) {
if (f.dataTotal - f.position) < uint32(length) {
return "", errors.New("not enough bytes to create a string")
}
output := make([]byte, 0, length)
for i := 0; i < length; i++ {
charIndex, err := f.GetInt()
if err != nil {
return string(output), err
}
output = append(output, possibleChars[charIndex%len(possibleChars)])
}
return string(output), nil
}
func (f *ConsumeFuzzer) GetRune() ([]rune, error) {
stringToConvert, err := f.GetString()
if err != nil {
return []rune("nil"), err
}
return []rune(stringToConvert), nil
}
func (f *ConsumeFuzzer) GetFloat32() (float32, error) {
u32, err := f.GetNBytes(4)
if err != nil {
return 0, err
}
littleEndian, err := f.GetBool()
if err != nil {
return 0, err
}
if littleEndian {
u32LE := binary.LittleEndian.Uint32(u32)
return math.Float32frombits(u32LE), nil
}
u32BE := binary.BigEndian.Uint32(u32)
return math.Float32frombits(u32BE), nil
}
func (f *ConsumeFuzzer) GetFloat64() (float64, error) {
u64, err := f.GetNBytes(8)
if err != nil {
return 0, err
}
littleEndian, err := f.GetBool()
if err != nil {
return 0, err
}
if littleEndian {
u64LE := binary.LittleEndian.Uint64(u64)
return math.Float64frombits(u64LE), nil
}
u64BE := binary.BigEndian.Uint64(u64)
return math.Float64frombits(u64BE), nil
}
func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error {
return f.GenerateStruct(targetSlice)
}

View File

@ -0,0 +1,62 @@
// Copyright 2023 The go-fuzz-headers Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gofuzzheaders
import (
"fmt"
"reflect"
)
type Continue struct {
F *ConsumeFuzzer
}
func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) {
for i := range fuzzFuncs {
v := reflect.ValueOf(fuzzFuncs[i])
if v.Kind() != reflect.Func {
panic("Need only funcs!")
}
t := v.Type()
if t.NumIn() != 2 || t.NumOut() != 1 {
fmt.Println(t.NumIn(), t.NumOut())
panic("Need 2 in and 1 out params. In must be the type. Out must be an error")
}
argT := t.In(0)
switch argT.Kind() {
case reflect.Ptr, reflect.Map:
default:
panic("fuzzFunc must take pointer or map type")
}
if t.In(1) != reflect.TypeOf(Continue{}) {
panic("fuzzFunc's second parameter must be type Continue")
}
f.Funcs[argT] = v
}
}
func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error {
e := reflect.ValueOf(targetStruct).Elem()
return f.fuzzStruct(e, true)
}
func (c Continue) GenerateStruct(targetStruct interface{}) error {
return c.F.GenerateStruct(targetStruct)
}
func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error {
return c.F.GenerateWithCustom(targetStruct)
}

View File

@ -0,0 +1,556 @@
// Copyright 2023 The go-fuzz-headers Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gofuzzheaders
import (
"fmt"
"strings"
)
// returns a keyword by index
func getKeyword(f *ConsumeFuzzer) (string, error) {
index, err := f.GetInt()
if err != nil {
return keywords[0], err
}
for i, k := range keywords {
if i == index {
return k, nil
}
}
return keywords[0], fmt.Errorf("could not get a kw")
}
// Simple utility function to check if a string
// slice contains a string.
func containsString(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// These keywords are used specifically for fuzzing Vitess
var keywords = []string{
"accessible", "action", "add", "after", "against", "algorithm",
"all", "alter", "always", "analyze", "and", "as", "asc", "asensitive",
"auto_increment", "avg_row_length", "before", "begin", "between",
"bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit",
"blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade",
"cascaded", "case", "cast", "channel", "change", "char", "character",
"charset", "check", "checksum", "coalesce", "code", "collate", "collation",
"column", "columns", "comment", "committed", "commit", "compact", "complete",
"compressed", "compression", "condition", "connection", "constraint", "continue",
"convert", "copy", "cume_dist", "substr", "substring", "create", "cross",
"csv", "current_date", "current_time", "current_timestamp", "current_user",
"cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond",
"day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare",
"default", "definer", "delay_key_write", "delayed", "delete", "dense_rank",
"desc", "describe", "deterministic", "directory", "disable", "discard",
"disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile",
"duplicate", "dynamic", "each", "else", "elseif", "empty", "enable",
"enclosed", "encryption", "end", "enforced", "engine", "engines", "enum",
"error", "escape", "escaped", "event", "exchange", "exclusive", "exists",
"exit", "explain", "expansion", "export", "extended", "extract", "false",
"fetch", "fields", "first", "first_value", "fixed", "float", "float4",
"float8", "flush", "for", "force", "foreign", "format", "from", "full",
"fulltext", "function", "general", "generated", "geometry", "geometrycollection",
"get", "global", "gtid_executed", "grant", "group", "grouping", "groups",
"group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond",
"hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes",
"infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method",
"int", "int1", "int2", "int3", "int4", "int8", "integer", "interval",
"into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join",
"json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag",
"language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading",
"leave", "left", "less", "level", "like", "limit", "linear", "lines",
"linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs",
"long", "longblob", "longtext", "loop", "low_priority", "manifest",
"master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint",
"mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute",
"minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies",
"multilinestring", "multipoint", "multipolygon", "month", "name",
"names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog",
"nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on",
"only", "open", "optimize", "optimizer_costs", "option", "optionally",
"or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys",
"parser", "partition", "partitioning", "password", "percent_rank", "plugins",
"point", "polygon", "precision", "primary", "privileges", "processlist",
"procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write",
"real", "rebuild", "recursive", "redundant", "references", "regexp", "relay",
"release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable",
"replace", "require", "resignal", "restrict", "return", "retry", "revert",
"revoke", "right", "rlike", "rollback", "row", "row_format", "row_number",
"rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond",
"security", "select", "sensitive", "separator", "sequence", "serializable",
"session", "set", "share", "shared", "show", "signal", "signed", "slow",
"smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate",
"sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows",
"sql_no_cache", "sql_small_result", "ssl", "start", "starting",
"stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status",
"storage", "stored", "straight_join", "stream", "system", "vstream",
"table", "tables", "tablespace", "temporary", "temptable", "terminated",
"text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff",
"tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree",
"traditional", "trigger", "triggers", "true", "truncate", "uncommitted",
"undefined", "undo", "union", "unique", "unlock", "unsigned", "update",
"upgrade", "usage", "use", "user", "user_resources", "using", "utc_date",
"utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary",
"varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex",
"vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata",
"vitess_migration", "vitess_migrations", "vitess_replication_status",
"vitess_shards", "vitess_tablets", "vschema", "warnings", "when",
"where", "while", "window", "with", "without", "work", "write", "xor",
"year", "year_month", "zerofill",
}
// Keywords that could get an additional keyword
var needCustomString = []string{
"DISTINCTROW", "FROM", // Select keywords:
"GROUP BY", "HAVING", "WINDOW",
"FOR",
"ORDER BY", "LIMIT",
"INTO", "PARTITION", "AS", // Insert Keywords:
"ON DUPLICATE KEY UPDATE",
"WHERE", "LIMIT", // Delete keywords
"INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords
"TERMINATED BY", "ENCLOSED BY",
"ESCAPED BY", "STARTING BY",
"TERMINATED BY", "STARTING BY",
"IGNORE",
"VALUE", "VALUES", // Replace tokens
"SET", // Update tokens
"ENGINE =", // Drop tokens
"DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens
"COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS",
}
var alterTableTokens = [][]string{
{"CUSTOM_FUZZ_STRING"},
{"CUSTOM_ALTTER_TABLE_OPTIONS"},
{"PARTITION_OPTIONS_FOR_ALTER_TABLE"},
}
var alterTokens = [][]string{
{
"DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE",
"LOGFILE GROUP", "PROCEDURE", "SERVER",
},
{"CUSTOM_FUZZ_STRING"},
{
"ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE",
"ADD UNDOFILE", "OPTIONS",
},
{"RENAME TO", "INITIAL_SIZE = "},
{"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"},
{"COMMENT"},
{"DO"},
}
var setTokens = [][]string{
{"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"},
{"CUSTOM_FUZZ_STRING", "DEFAULT", "="},
{"CUSTOM_FUZZ_STRING"},
}
var dropTokens = [][]string{
{"TEMPORARY", "UNDO"},
{
"DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP",
"PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM",
"TABLE", "TABLESPACE", "TRIGGER", "VIEW",
},
{"IF EXISTS"},
{"CUSTOM_FUZZ_STRING"},
{"ON", "ENGINE = ", "RESTRICT", "CASCADE"},
}
var renameTokens = [][]string{
{"TABLE"},
{"CUSTOM_FUZZ_STRING"},
{"TO"},
{"CUSTOM_FUZZ_STRING"},
}
var truncateTokens = [][]string{
{"TABLE"},
{"CUSTOM_FUZZ_STRING"},
}
var createTokens = [][]string{
{"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system
{
"UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE",
"ALGORITHM = TEMPTABLE",
},
{
"DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP",
"PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE",
"TRIGGER", "VIEW",
},
{"IF NOT EXISTS"},
{"CUSTOM_FUZZ_STRING"},
}
/*
// For future use.
var updateTokens = [][]string{
{"LOW_PRIORITY"},
{"IGNORE"},
{"SET"},
{"WHERE"},
{"ORDER BY"},
{"LIMIT"},
}
*/
var replaceTokens = [][]string{
{"LOW_PRIORITY", "DELAYED"},
{"INTO"},
{"PARTITION"},
{"CUSTOM_FUZZ_STRING"},
{"VALUES", "VALUE"},
}
var loadTokens = [][]string{
{"DATA"},
{"LOW_PRIORITY", "CONCURRENT", "LOCAL"},
{"INFILE"},
{"REPLACE", "IGNORE"},
{"INTO TABLE"},
{"PARTITION"},
{"CHARACTER SET"},
{"FIELDS", "COLUMNS"},
{"TERMINATED BY"},
{"OPTIONALLY"},
{"ENCLOSED BY"},
{"ESCAPED BY"},
{"LINES"},
{"STARTING BY"},
{"TERMINATED BY"},
{"IGNORE"},
{"LINES", "ROWS"},
{"CUSTOM_FUZZ_STRING"},
}
// These Are everything that comes after "INSERT"
var insertTokens = [][]string{
{"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"},
{"INTO"},
{"PARTITION"},
{"CUSTOM_FUZZ_STRING"},
{"AS"},
{"ON DUPLICATE KEY UPDATE"},
}
// These are everything that comes after "SELECT"
var selectTokens = [][]string{
{"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"},
{"HIGH_PRIORITY"},
{"STRAIGHT_JOIN"},
{"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"},
{"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"},
{"CUSTOM_FUZZ_STRING"},
{"FROM"},
{"WHERE"},
{"GROUP BY"},
{"HAVING"},
{"WINDOW"},
{"ORDER BY"},
{"LIMIT"},
{"CUSTOM_FUZZ_STRING"},
{"FOR"},
}
// These are everything that comes after "DELETE"
var deleteTokens = [][]string{
{"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"},
{"PARTITION"},
{"WHERE"},
{"ORDER BY"},
{"LIMIT"},
}
var alter_table_options = []string{
"ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL",
"CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER",
"NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE",
"CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK",
"FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN",
"AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION",
"DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION",
"COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION",
"ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION",
"REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT",
"KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH",
"CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT",
"COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT",
"STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS",
"MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY",
"DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION",
}
// Creates an 'alter table' statement. 'alter table' is an exception
// in that it has its own function. The majority of statements
// are created by 'createStmt()'.
func createAlterTableStmt(f *ConsumeFuzzer) (string, error) {
maxArgs, err := f.GetInt()
if err != nil {
return "", err
}
maxArgs = maxArgs % 30
if maxArgs == 0 {
return "", fmt.Errorf("could not create alter table stmt")
}
var stmt strings.Builder
stmt.WriteString("ALTER TABLE ")
for i := 0; i < maxArgs; i++ {
// Calculate if we get existing token or custom string
tokenType, err := f.GetInt()
if err != nil {
return "", err
}
if tokenType%4 == 1 {
customString, err := f.GetString()
if err != nil {
return "", err
}
stmt.WriteString(" " + customString)
} else {
tokenIndex, err := f.GetInt()
if err != nil {
return "", err
}
stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)])
}
}
return stmt.String(), nil
}
func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) {
index, err := f.GetInt()
if err != nil {
return "", err
}
var token strings.Builder
token.WriteString(tokens[index%len(tokens)])
if token.String() == "CUSTOM_FUZZ_STRING" {
customFuzzString, err := f.GetString()
if err != nil {
return "", err
}
return customFuzzString, nil
}
// Check if token requires an argument
if containsString(needCustomString, token.String()) {
customFuzzString, err := f.GetString()
if err != nil {
return "", err
}
token.WriteString(" " + customFuzzString)
}
return token.String(), nil
}
var stmtTypes = map[string][][]string{
"DELETE": deleteTokens,
"INSERT": insertTokens,
"SELECT": selectTokens,
"LOAD": loadTokens,
"REPLACE": replaceTokens,
"CREATE": createTokens,
"DROP": dropTokens,
"RENAME": renameTokens,
"TRUNCATE": truncateTokens,
"SET": setTokens,
"ALTER": alterTokens,
"ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens
}
var stmtTypeEnum = map[int]string{
0: "DELETE",
1: "INSERT",
2: "SELECT",
3: "LOAD",
4: "REPLACE",
5: "CREATE",
6: "DROP",
7: "RENAME",
8: "TRUNCATE",
9: "SET",
10: "ALTER",
11: "ALTER TABLE",
}
func createStmt(f *ConsumeFuzzer) (string, error) {
stmtIndex, err := f.GetInt()
if err != nil {
return "", err
}
stmtIndex = stmtIndex % len(stmtTypes)
queryType := stmtTypeEnum[stmtIndex]
tokens := stmtTypes[queryType]
// We have custom creator for ALTER TABLE
if queryType == "ALTER TABLE" {
query, err := createAlterTableStmt(f)
if err != nil {
return "", err
}
return query, nil
}
// Here we are creating a query that is not
// an 'alter table' query. For available
// queries, see "stmtTypes"
// First specify the first query keyword:
var query strings.Builder
query.WriteString(queryType)
// Next create the args for the
queryArgs, err := createStmtArgs(tokens, f)
if err != nil {
return "", err
}
query.WriteString(" " + queryArgs)
return query.String(), nil
}
// Creates the arguments of a statements. In a select statement
// that would be everything after "select".
func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) {
var query, token strings.Builder
// We go through the tokens in the tokenslice,
// create the respective token and add it to
// "query"
for _, tokens := range tokenslice {
// For extra randomization, the fuzzer can
// choose to not include this token.
includeThisToken, err := f.GetBool()
if err != nil {
return "", err
}
if !includeThisToken {
continue
}
// There may be several tokens to choose from:
if len(tokens) > 1 {
chosenToken, err := chooseToken(tokens, f)
if err != nil {
return "", err
}
query.WriteString(" " + chosenToken)
} else {
token.WriteString(tokens[0])
// In case the token is "CUSTOM_FUZZ_STRING"
// we will then create a non-structured string
if token.String() == "CUSTOM_FUZZ_STRING" {
customFuzzString, err := f.GetString()
if err != nil {
return "", err
}
query.WriteString(" " + customFuzzString)
continue
}
// Check if token requires an argument.
// Tokens that take an argument can be found
// in 'needCustomString'. If so, we add a
// non-structured string to the token.
if containsString(needCustomString, token.String()) {
customFuzzString, err := f.GetString()
if err != nil {
return "", err
}
token.WriteString(fmt.Sprintf(" %s", customFuzzString))
}
query.WriteString(fmt.Sprintf(" %s", token.String()))
}
}
return query.String(), nil
}
// Creates a semi-structured query. It creates a string
// that is a combination of the keywords and random strings.
func createQuery(f *ConsumeFuzzer) (string, error) {
queryLen, err := f.GetInt()
if err != nil {
return "", err
}
maxLen := queryLen % 60
if maxLen == 0 {
return "", fmt.Errorf("could not create a query")
}
var query strings.Builder
for i := 0; i < maxLen; i++ {
// Get a new token:
useKeyword, err := f.GetBool()
if err != nil {
return "", err
}
if useKeyword {
keyword, err := getKeyword(f)
if err != nil {
return "", err
}
query.WriteString(" " + keyword)
} else {
customString, err := f.GetString()
if err != nil {
return "", err
}
query.WriteString(" " + customString)
}
}
if query.String() == "" {
return "", fmt.Errorf("could not create a query")
}
return query.String(), nil
}
// GetSQLString is the API that users interact with.
//
// Usage:
//
// f := NewConsumer(data)
// sqlString, err := f.GetSQLString()
func (f *ConsumeFuzzer) GetSQLString() (string, error) {
var query string
veryStructured, err := f.GetBool()
if err != nil {
return "", err
}
if veryStructured {
query, err = createStmt(f)
if err != nil {
return "", err
}
} else {
query, err = createQuery(f)
if err != nil {
return "", err
}
}
return query, nil
}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,207 @@
package testing
import (
"fmt"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"os"
"reflect"
)
type F struct {
Data []byte
T *T
FuzzFunc func(*T, any)
}
func (f *F) CleanupTempDirs() {
f.T.CleanupTempDirs()
}
func (f *F) Add(args ...any) {}
func (c *F) Cleanup(f func()) {}
func (c *F) Error(args ...any) {}
func (c *F) Errorf(format string, args ...any) {}
func (f *F) Fail() {}
func (c *F) FailNow() {}
func (c *F) Failed() bool { return false }
func (c *F) Fatal(args ...any) {}
func (c *F) Fatalf(format string, args ...any) {}
func (f *F) Fuzz(ff any) {
// we are assuming that ff is a func.
// TODO: Add a check for UX purposes
fn := reflect.ValueOf(ff)
fnType := fn.Type()
var types []reflect.Type
for i := 1; i < fnType.NumIn(); i++ {
t := fnType.In(i)
types = append(types, t)
}
args := []reflect.Value{reflect.ValueOf(f.T)}
fuzzConsumer := fuzz.NewConsumer(f.Data)
for _, v := range types {
switch v.String() {
case "[]uint8":
b, err := fuzzConsumer.GetBytes()
if err != nil {
return
}
newBytes := reflect.New(v)
newBytes.Elem().SetBytes(b)
args = append(args, newBytes.Elem())
case "string":
s, err := fuzzConsumer.GetString()
if err != nil {
return
}
newString := reflect.New(v)
newString.Elem().SetString(s)
args = append(args, newString.Elem())
case "int":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newInt := reflect.New(v)
newInt.Elem().SetInt(int64(randInt))
args = append(args, newInt.Elem())
case "int8":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newInt := reflect.New(v)
newInt.Elem().SetInt(int64(randInt))
args = append(args, newInt.Elem())
case "int16":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newInt := reflect.New(v)
newInt.Elem().SetInt(int64(randInt))
args = append(args, newInt.Elem())
case "int32":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newInt := reflect.New(v)
newInt.Elem().SetInt(int64(randInt))
args = append(args, newInt.Elem())
case "int64":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newInt := reflect.New(v)
newInt.Elem().SetInt(int64(randInt))
args = append(args, newInt.Elem())
case "uint":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newUint := reflect.New(v)
newUint.Elem().SetUint(uint64(randInt))
args = append(args, newUint.Elem())
case "uint8":
randInt, err := fuzzConsumer.GetInt()
if err != nil {
return
}
newUint := reflect.New(v)
newUint.Elem().SetUint(uint64(randInt))
args = append(args, newUint.Elem())
case "uint16":
randInt, err := fuzzConsumer.GetUint16()
if err != nil {
return
}
newUint16 := reflect.New(v)
newUint16.Elem().SetUint(uint64(randInt))
args = append(args, newUint16.Elem())
case "uint32":
randInt, err := fuzzConsumer.GetUint32()
if err != nil {
return
}
newUint32 := reflect.New(v)
newUint32.Elem().SetUint(uint64(randInt))
args = append(args, newUint32.Elem())
case "uint64":
randInt, err := fuzzConsumer.GetUint64()
if err != nil {
return
}
newUint64 := reflect.New(v)
newUint64.Elem().SetUint(uint64(randInt))
args = append(args, newUint64.Elem())
case "rune":
randRune, err := fuzzConsumer.GetRune()
if err != nil {
return
}
newRune := reflect.New(v)
newRune.Elem().Set(reflect.ValueOf(randRune))
args = append(args, newRune.Elem())
case "float32":
randFloat, err := fuzzConsumer.GetFloat32()
if err != nil {
return
}
newFloat := reflect.New(v)
newFloat.Elem().Set(reflect.ValueOf(randFloat))
args = append(args, newFloat.Elem())
case "float64":
randFloat, err := fuzzConsumer.GetFloat64()
if err != nil {
return
}
newFloat := reflect.New(v)
newFloat.Elem().Set(reflect.ValueOf(randFloat))
args = append(args, newFloat.Elem())
case "bool":
randBool, err := fuzzConsumer.GetBool()
if err != nil {
return
}
newBool := reflect.New(v)
newBool.Elem().Set(reflect.ValueOf(randBool))
args = append(args, newBool.Elem())
default:
fmt.Println(v.String())
}
}
fn.Call(args)
}
func (f *F) Helper() {}
func (c *F) Log(args ...any) {
fmt.Println(args...)
}
func (c *F) Logf(format string, args ...any) {
fmt.Println(format, args)
}
func (c *F) Name() string { return "libFuzzer" }
func (c *F) Setenv(key, value string) {}
func (c *F) Skip(args ...any) {
panic("GO-FUZZ-BUILD-PANIC")
}
func (c *F) SkipNow() {
panic("GO-FUZZ-BUILD-PANIC")
}
func (c *F) Skipf(format string, args ...any) {
panic("GO-FUZZ-BUILD-PANIC")
}
func (f *F) Skipped() bool { return false }
func (f *F) TempDir() string {
dir, err := os.MkdirTemp("", "fuzzdir-")
if err != nil {
panic(err)
}
f.T.TempDirs = append(f.T.TempDirs, dir)
return dir
}

View File

@ -0,0 +1,129 @@
package testing
import (
"fmt"
"os"
"strings"
"time"
)
// T can be used to terminate the current fuzz iteration
// without terminating the whole fuzz run. To do so, simply
// panic with the text "GO-FUZZ-BUILD-PANIC" and the fuzzer
// will recover.
type T struct {
TempDirs []string
}
func NewT() *T {
tempDirs := make([]string, 0)
return &T{TempDirs: tempDirs}
}
func unsupportedApi(name string) string {
plsOpenIss := "Please open an issue https://github.com/AdamKorcz/go-118-fuzz-build if you need this feature."
var b strings.Builder
b.WriteString(fmt.Sprintf("%s is not supported when fuzzing in libFuzzer mode\n.", name))
b.WriteString(plsOpenIss)
return b.String()
}
func (t *T) Cleanup(f func()) {
f()
}
func (t *T) Deadline() (deadline time.Time, ok bool) {
panic(unsupportedApi("t.Deadline()"))
}
func (t *T) Error(args ...any) {
fmt.Println(args...)
panic("error")
}
func (t *T) Errorf(format string, args ...any) {
fmt.Printf(format+"\n", args...)
panic("errorf")
}
func (t *T) Fail() {
panic("Called T.Fail()")
}
func (t *T) FailNow() {
panic("Called T.Fail()")
panic(unsupportedApi("t.FailNow()"))
}
func (t *T) Failed() bool {
panic(unsupportedApi("t.Failed()"))
}
func (t *T) Fatal(args ...any) {
fmt.Println(args...)
panic("fatal")
}
func (t *T) Fatalf(format string, args ...any) {
fmt.Printf(format+"\n", args...)
panic("fatal")
}
func (t *T) Helper() {
// We can't support it, but it also just impacts how failures are reported, so we can ignore it
}
func (t *T) Log(args ...any) {
fmt.Println(args...)
}
func (t *T) Logf(format string, args ...any) {
fmt.Println(format)
fmt.Println(args...)
}
func (t *T) Name() string {
return "libFuzzer"
}
func (t *T) Parallel() {
panic(unsupportedApi("t.Parallel()"))
}
func (t *T) Run(name string, f func(t *T)) bool {
panic(unsupportedApi("t.Run()"))
}
func (t *T) Setenv(key, value string) {
}
func (t *T) Skip(args ...any) {
panic("GO-FUZZ-BUILD-PANIC")
}
func (t *T) SkipNow() {
panic("GO-FUZZ-BUILD-PANIC")
}
// Is not really supported. We just skip instead
// of printing any message. A log message can be
// added if need be.
func (t *T) Skipf(format string, args ...any) {
panic("GO-FUZZ-BUILD-PANIC")
}
func (t *T) Skipped() bool {
panic(unsupportedApi("t.Skipped()"))
}
func (t *T) TempDir() string {
dir, err := os.MkdirTemp("", "fuzzdir-")
if err != nil {
panic(err)
}
t.TempDirs = append(t.TempDirs, dir)
return dir
}
func (t *T) CleanupTempDirs() {
if len(t.TempDirs) > 0 {
for _, tempDir := range t.TempDirs {
os.RemoveAll(tempDir)
}
}
}

View File

@ -0,0 +1,42 @@
package testing
import (
"testing"
)
func AllocsPerRun(runs int, f func()) (avg float64) {
panic(unsupportedApi("testing.AllocsPerRun"))
}
func CoverMode() string {
panic(unsupportedApi("testing.CoverMode"))
}
func Coverage() float64 {
panic(unsupportedApi("testing.Coverage"))
}
func Init() {
panic(unsupportedApi("testing.Init"))
}
func RegisterCover(c testing.Cover) {
panic(unsupportedApi("testing.RegisterCover"))
}
func RunExamples(matchString func(pat, str string) (bool, error), examples []testing.InternalExample) (ok bool) {
panic(unsupportedApi("testing.RunExamples"))
}
func RunTests(matchString func(pat, str string) (bool, error), tests []testing.InternalTest) (ok bool) {
panic(unsupportedApi("testing.RunTests"))
}
func Short() bool {
return false
}
func Verbose() bool {
panic(unsupportedApi("testing.Verbose"))
}
type M struct {}
func (m *M) Run() (code int) {
panic("testing.M is not support in libFuzzer Mode")
}

View File

@ -21,7 +21,9 @@ type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
// Unmarshal decodes the contents of data in TOML format into a pointer v.
//
// See [Decoder] for a description of the decoding process.
func Unmarshal(data []byte, v interface{}) error {
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
@ -29,13 +31,12 @@ func Unmarshal(data []byte, v interface{}) error {
// Decode the TOML data in to the pointer v.
//
// See the documentation on Decoder for a description of the decoding process.
// See [Decoder] for a description of the decoding process.
func Decode(data string, v interface{}) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at path and decode it for you.
// DecodeFile reads the contents of a file and decodes it with [Decode].
func DecodeFile(path string, v interface{}) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
@ -48,7 +49,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
// Primitive is a TOML value that hasn't been decoded into a Go value.
//
// This type can be used for any value, which will cause decoding to be delayed.
// You can use the PrimitiveDecode() function to "manually" decode these values.
// You can use [PrimitiveDecode] to "manually" decode these values.
//
// NOTE: The underlying representation of a `Primitive` value is subject to
// change. Do not rely on it.
@ -70,15 +71,15 @@ const (
// Decoder decodes TOML data.
//
// TOML tables correspond to Go structs or maps (dealer's choice they can be
// used interchangeably).
// TOML tables correspond to Go structs or maps; they can be used
// interchangeably, but structs offer better type safety.
//
// TOML table arrays correspond to either a slice of structs or a slice of maps.
//
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the
// local timezone.
//
// time.Duration types are treated as nanoseconds if the TOML value is an
// [time.Duration] types are treated as nanoseconds if the TOML value is an
// integer, or they're parsed with time.ParseDuration() if they're strings.
//
// All other TOML types (float, string, int, bool and array) correspond to the
@ -90,7 +91,7 @@ const (
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses.
//
// Key mapping
// ### Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
@ -168,17 +169,16 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
return md, md.unify(p.mapping, rv)
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
// PrimitiveDecode is just like the other Decode* functions, except it decodes a
// TOML value that has already been parsed. Valid primitive values can *only* be
// obtained from values filled by the decoder functions, including this method.
// (i.e., v may contain more [Primitive] values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
// Meta data for primitive values is included in the meta data returned by the
// Decode* functions with one exception: keys returned by the Undecoded method
// will only reflect keys that were decoded. Namely, any keys hidden behind a
// Primitive will be considered undecoded. Executing this method will update the
// undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()

View File

@ -7,8 +7,8 @@ import (
"io/fs"
)
// DecodeFS is just like Decode, except it will automatically read the contents
// of the file at `path` from a fs.FS instance.
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
// [Decode].
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
fp, err := fsys.Open(path)
if err != nil {

View File

@ -1,13 +1,11 @@
/*
Package toml implements decoding and encoding of TOML files.
This package supports TOML v1.0.0, as listed on https://toml.io
There is also support for delaying decoding with the Primitive type, and
querying the set of keys in a TOML document with the MetaData type.
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
and can be used to verify if TOML document is valid. It can also be used to
print the type of each key.
*/
// Package toml implements decoding and encoding of TOML files.
//
// This package supports TOML v1.0.0, as specified at https://toml.io
//
// There is also support for delaying decoding with the Primitive type, and
// querying the set of keys in a TOML document with the MetaData type.
//
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
// and can be used to verify if TOML document is valid. It can also be used to
// print the type of each key.
package toml

View File

@ -79,12 +79,12 @@ type Marshaler interface {
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
// for the Decode* functions.
// for [Decode].
//
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
// representation.
//
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to
// encoding the value as custom TOML.
//
// If you want to write arbitrary binary data then you will need to use
@ -130,7 +130,7 @@ func NewEncoder(w io.Writer) *Encoder {
}
}
// Encode writes a TOML representation of the Go value to the Encoder's writer.
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
//
// An error is returned if the value given cannot be encoded to a valid TOML
// document.
@ -261,7 +261,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
enc.eElement(reflect.ValueOf(v))
return
}
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n))
}
switch rv.Kind() {
@ -504,7 +504,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(fieldVal) {
if opts.omitempty && enc.isEmpty(fieldVal) {
continue
}
if opts.omitzero && isZero(fieldVal) {
@ -648,12 +649,26 @@ func isZero(rv reflect.Value) bool {
return false
}
func isEmpty(rv reflect.Value) bool {
func (enc *Encoder) isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Struct:
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
if rv.Type().Comparable() {
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
}
// Need to also check if all the fields are empty, otherwise something
// like this with uncomparable types will always return true:
//
// type a struct{ field b }
// type b struct{ s []string }
// s := a{field: b{s: []string{"AAA"}}}
for i := 0; i < rv.NumField(); i++ {
if !enc.isEmpty(rv.Field(i)) {
return false
}
}
return true
case reflect.Bool:
return !rv.Bool()
}
@ -668,16 +683,15 @@ func (enc *Encoder) newline() {
// Write a key/value pair:
//
// key = <any value>
// key = <any value>
//
// This is also used for "k = v" in inline tables; so something like this will
// be written in three calls:
//
// ┌────────────────────┐
// │ ┌───┐ ┌─────┐│
// v v v v vv
// key = {k = v, k2 = v2}
//
// ┌───────────────────┐
// │ ┌───┐ ┌────┐│
// v v v v vv
// key = {k = 1, k2 = 2}
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)

View File

@ -5,57 +5,60 @@ import (
"strings"
)
// ParseError is returned when there is an error parsing the TOML syntax.
//
// For example invalid syntax, duplicate keys, etc.
// ParseError is returned when there is an error parsing the TOML syntax such as
// invalid syntax, duplicate keys, etc.
//
// In addition to the error message itself, you can also print detailed location
// information with context by using ErrorWithPosition():
// information with context by using [ErrorWithPosition]:
//
// toml: error: Key 'fruit' was already created and cannot be used as an array.
// toml: error: Key 'fruit' was already created and cannot be used as an array.
//
// At line 4, column 2-7:
// At line 4, column 2-7:
//
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
//
// Furthermore, the ErrorWithUsage() can be used to print the above with some
// more detailed usage guidance:
// [ErrorWithUsage] can be used to print the above with some more detailed usage
// guidance:
//
// toml: error: newlines not allowed within inline tables
// toml: error: newlines not allowed within inline tables
//
// At line 1, column 18:
// At line 1, column 18:
//
// 1 | x = [{ key = 42 #
// ^
// 1 | x = [{ key = 42 #
// ^
//
// Error help:
// Error help:
//
// Inline tables must always be on a single line:
// Inline tables must always be on a single line:
//
// table = {key = 42, second = 43}
// table = {key = 42, second = 43}
//
// It is invalid to split them over multiple lines like so:
// It is invalid to split them over multiple lines like so:
//
// # INVALID
// table = {
// key = 42,
// second = 43
// }
// # INVALID
// table = {
// key = 42,
// second = 43
// }
//
// Use regular for this:
// Use regular for this:
//
// [table]
// key = 42
// second = 43
// [table]
// key = 42
// second = 43
type ParseError struct {
Message string // Short technical message.
Usage string // Longer message with usage guidance; may be blank.
Position Position // Position of the error
LastKey string // Last parsed key, may be blank.
Line int // Line the error occurred. Deprecated: use Position.
// Line the error occurred.
//
// Deprecated: use [Position].
Line int
err error
input string
@ -83,7 +86,7 @@ func (pe ParseError) Error() string {
// ErrorWithUsage() returns the error with detailed location context.
//
// See the documentation on ParseError.
// See the documentation on [ParseError].
func (pe ParseError) ErrorWithPosition() string {
if pe.input == "" { // Should never happen, but just in case.
return pe.Error()
@ -124,7 +127,7 @@ func (pe ParseError) ErrorWithPosition() string {
// ErrorWithUsage() returns the error with detailed location context and usage
// guidance.
//
// See the documentation on ParseError.
// See the documentation on [ParseError].
func (pe ParseError) ErrorWithUsage() string {
m := pe.ErrorWithPosition()
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {

View File

@ -771,7 +771,7 @@ func lexRawString(lx *lexer) stateFn {
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// a string. It assumes that the beginning ''' has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()

View File

@ -71,7 +71,7 @@ func (md *MetaData) Keys() []Key {
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// This includes keys that haven't been decoded because of a [Primitive] value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
@ -89,7 +89,7 @@ func (md *MetaData) Undecoded() []Key {
return undecoded
}
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get
// values of this type.
type Key []string

View File

@ -0,0 +1 @@
* text=auto eol=lf

View File

@ -1 +1,10 @@
.vscode/
*.exe
# testing
testdata
# go workspaces
go.work
go.work.sum

View File

@ -0,0 +1,149 @@
run:
skip-dirs:
- pkg/etw/sample
linters:
enable:
# style
- containedctx # struct contains a context
- dupl # duplicate code
- errname # erorrs are named correctly
- nolintlint # "//nolint" directives are properly explained
- revive # golint replacement
- unconvert # unnecessary conversions
- wastedassign
# bugs, performance, unused, etc ...
- contextcheck # function uses a non-inherited context
- errorlint # errors not wrapped for 1.13
- exhaustive # check exhaustiveness of enum switch statements
- gofmt # files are gofmt'ed
- gosec # security
- nilerr # returns nil even with non-nil error
- unparam # unused function params
issues:
exclude-rules:
# err is very often shadowed in nested scopes
- linters:
- govet
text: '^shadow: declaration of "err" shadows declaration'
# ignore long lines for skip autogen directives
- linters:
- revive
text: "^line-length-limit: "
source: "^//(go:generate|sys) "
#TODO: remove after upgrading to go1.18
# ignore comment spacing for nolint and sys directives
- linters:
- revive
text: "^comment-spacings: no space between comment delimiter and comment text"
source: "//(cspell:|nolint:|sys |todo)"
# not on go 1.18 yet, so no any
- linters:
- revive
text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
# allow unjustified ignores of error checks in defer statements
- linters:
- nolintlint
text: "^directive `//nolint:errcheck` should provide explanation"
source: '^\s*defer '
# allow unjustified ignores of error lints for io.EOF
- linters:
- nolintlint
text: "^directive `//nolint:errorlint` should provide explanation"
source: '[=|!]= io.EOF'
linters-settings:
exhaustive:
default-signifies-exhaustive: true
govet:
enable-all: true
disable:
# struct order is often for Win32 compat
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
- fieldalignment
check-shadowing: true
nolintlint:
allow-leading-space: false
require-explanation: true
require-specific: true
revive:
# revive is more configurable than static check, so likely the preferred alternative to static-check
# (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
enable-all-rules:
true
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
rules:
# rules with required arguments
- name: argument-limit
disabled: true
- name: banned-characters
disabled: true
- name: cognitive-complexity
disabled: true
- name: cyclomatic
disabled: true
- name: file-header
disabled: true
- name: function-length
disabled: true
- name: function-result-limit
disabled: true
- name: max-public-structs
disabled: true
# geneally annoying rules
- name: add-constant # complains about any and all strings and integers
disabled: true
- name: confusing-naming # we frequently use "Foo()" and "foo()" together
disabled: true
- name: flag-parameter # excessive, and a common idiom we use
disabled: true
- name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
disabled: true
# general config
- name: line-length-limit
arguments:
- 140
- name: var-naming
arguments:
- []
- - CID
- CRI
- CTRD
- DACL
- DLL
- DOS
- ETW
- FSCTL
- GCS
- GMSA
- HCS
- HV
- IO
- LCOW
- LDAP
- LPAC
- LTSC
- MMIO
- NT
- OCI
- PMEM
- PWSH
- RX
- SACl
- SID
- SMB
- TX
- VHD
- VHDX
- VMID
- VPCI
- WCOW
- WIM

View File

@ -13,16 +13,60 @@ Please see the LICENSE file for licensing information.
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
This project welcomes contributions and suggestions.
Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
you have the right to, and actually do, grant us the rights to use your contribution.
For details, visit [Microsoft CLA](https://cla.microsoft.com).
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
When you submit a pull request, a CLA-bot will automatically determine whether you need to
provide a CLA and decorate the PR appropriately (e.g., label, comment).
Simply follow the instructions provided by the bot.
You will only need to do this once across all repos using our CLA.
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
Additionally, the pull request pipeline requires the following steps to be performed before
mergining.
### Code Sign-Off
We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
to certify they either authored the work themselves or otherwise have permission to use it in this project.
A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
Please see [the developer certificate](https://developercertificate.org) for more info,
as well as to make sure that you can attest to the rules listed.
Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
### Linting
Code must pass a linting stage, which uses [`golangci-lint`][lint].
The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
automatically with VSCode by adding the following to your workspace or folder settings:
```json
"go.lintTool": "golangci-lint",
"go.lintOnSave": "package",
```
Additional editor [integrations options are also available][lint-ide].
Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
```shell
# use . or specify a path to only lint a package
# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
> golangci-lint run ./...
```
### Go Generate
The pipeline checks that auto-generated code, via `go generate`, are up to date.
This can be done for the entire repo:
```shell
> go generate ./...
```
## Code of Conduct
@ -30,8 +74,16 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Special Thanks
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.
Thanks to [natefinch][natefinch] for the inspiration for this library.
See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
[lint]: https://golangci-lint.run/
[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
[lint-install]: https://golangci-lint.run/usage/install/#local-installation
[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
[natefinch]: https://github.com/natefinch

View File

@ -0,0 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package winio
@ -7,11 +8,12 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"unicode/utf16"
"golang.org/x/sys/windows"
)
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
@ -24,7 +26,7 @@ const (
BackupAlternateData
BackupLink
BackupPropertyData
BackupObjectId
BackupObjectId //revive:disable-line:var-naming ID, not Id
BackupReparseData
BackupSparseBlock
BackupTxfsData
@ -34,14 +36,16 @@ const (
StreamSparseAttributes = uint32(8)
)
//nolint:revive // var-naming: ALL_CAPS
const (
WRITE_DAC = 0x40000
WRITE_OWNER = 0x80000
ACCESS_SYSTEM_SECURITY = 0x1000000
WRITE_DAC = windows.WRITE_DAC
WRITE_OWNER = windows.WRITE_OWNER
ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
//revive:disable-next-line:var-naming ID, not Id
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
@ -49,8 +53,8 @@ type BackupHeader struct {
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
type win32StreamId struct {
StreamId uint32
type win32StreamID struct {
StreamID uint32
Attributes uint32
Size uint64
NameSize uint32
@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if r.bytesLeft > 0 {
if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
if s, ok := r.r.(io.Seeker); ok {
// Make sure Seek on io.SeekCurrent sometimes succeeds
// before trying the actual seek.
@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
r.bytesLeft = 0
}
}
if _, err := io.Copy(ioutil.Discard, r); err != nil {
if _, err := io.Copy(io.Discard, r); err != nil {
return nil, err
}
}
var wsi win32StreamId
var wsi win32StreamID
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
Id: wsi.StreamId,
Id: wsi.StreamID,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
}
hdr.Name = syscall.UTF16ToString(name)
}
if wsi.StreamId == BackupSparseBlock {
if wsi.StreamID == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
wsi := win32StreamId{
StreamId: hdr.Id,
wsi := win32StreamID{
StreamID: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
}
runtime.KeepAlive(r.f)
if bytesRead == 0 {
@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
_ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
runtime.KeepAlive(r.f)
r.ctx = 0
}
@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
}
runtime.KeepAlive(w.f)
if int(bytesWritten) != len(b) {
@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
_ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
runtime.KeepAlive(w.f)
w.ctx = 0
}
@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32)
if err != nil {
return nil, err
}
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
h, err := syscall.CreateFile(&winPath[0],
access,
share,
nil,
createmode,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT,
0)
if err != nil {
err = &os.PathError{Op: "open", Path: path, Err: err}
return nil, err

View File

@ -0,0 +1,3 @@
// This file only exists to allow go get on non-Windows platforms.
package backuptar

View File

@ -0,0 +1,70 @@
//go:build windows
package backuptar
import (
"archive/tar"
"fmt"
"strconv"
"strings"
"time"
)
// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
// Idea taken from containerd which did the same thing.
// parsePAXTime takes a string of the form %d.%d as described in the PAX
// specification. Note that this implementation allows for negative timestamps,
// which is allowed for by the PAX specification, but not always portable.
func parsePAXTime(s string) (time.Time, error) {
const maxNanoSecondDigits = 9
// Split string into seconds and sub-seconds parts.
ss, sn := s, ""
if pos := strings.IndexByte(s, '.'); pos >= 0 {
ss, sn = s[:pos], s[pos+1:]
}
// Parse the seconds.
secs, err := strconv.ParseInt(ss, 10, 64)
if err != nil {
return time.Time{}, tar.ErrHeader
}
if len(sn) == 0 {
return time.Unix(secs, 0), nil // No sub-second values
}
// Parse the nanoseconds.
if strings.Trim(sn, "0123456789") != "" {
return time.Time{}, tar.ErrHeader
}
if len(sn) < maxNanoSecondDigits {
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
} else {
sn = sn[:maxNanoSecondDigits] // Right truncate
}
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -1*nsecs), nil // Negative correction
}
return time.Unix(secs, nsecs), nil
}
// formatPAXTime converts ts into a time of the form %d.%d as described in the
// PAX specification. This function is capable of negative timestamps.
func formatPAXTime(ts time.Time) (s string) {
secs, nsecs := ts.Unix(), ts.Nanosecond()
if nsecs == 0 {
return strconv.FormatInt(secs, 10)
}
// If seconds is negative, then perform correction.
sign := ""
if secs < 0 {
sign = "-" // Remember sign
secs = -(secs + 1) // Add a second to secs
nsecs = -(nsecs - 1e9) // Take that second away from nsecs
}
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
}

View File

@ -0,0 +1,509 @@
//go:build windows
// +build windows
package backuptar
import (
"archive/tar"
"encoding/base64"
"fmt"
"io"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Microsoft/go-winio"
"golang.org/x/sys/windows"
)
//nolint:deadcode,varcheck // keep unused constants for potential future use
const (
cISUID = 0004000 // Set uid
cISGID = 0002000 // Set gid
cISVTX = 0001000 // Save text (sticky bit)
cISDIR = 0040000 // Directory
cISFIFO = 0010000 // FIFO
cISREG = 0100000 // Regular file
cISLNK = 0120000 // Symbolic link
cISBLK = 0060000 // Block special file
cISCHR = 0020000 // Character special file
cISSOCK = 0140000 // Socket
)
const (
hdrFileAttributes = "MSWINDOWS.fileattr"
hdrSecurityDescriptor = "MSWINDOWS.sd"
hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
hdrMountPoint = "MSWINDOWS.mountpoint"
hdrEaPrefix = "MSWINDOWS.xattr."
hdrCreationTime = "LIBARCHIVE.creationtime"
)
// zeroReader is an io.Reader that always returns 0s.
type zeroReader struct{}
func (zeroReader) Read(b []byte) (int, error) {
for i := range b {
b[i] = 0
}
return len(b), nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
err = io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if bhdr.Id != winio.BackupSparseBlock {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
// We can't seek backwards, since we have already written that data to the tar.Writer.
if bhdr.Offset < curOffset {
return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset)
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err)
}
if bhdr.Size == 0 {
// A sparse block with size = 0 is used to mark the end of the sparse blocks.
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
if n != bhdr.Size {
return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset)
}
curOffset = bhdr.Offset + n
}
return nil
}
// BasicInfoHeader creates a tar header from basic file information.
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
hdr := &tar.Header{
Format: tar.FormatPAX,
Name: filepath.ToSlash(name),
Size: size,
Typeflag: tar.TypeReg,
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
PAXRecords: make(map[string]string),
}
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= cISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
return hdr
}
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
// from the tar header and returns the security descriptor into a byte slice.
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
sd, err := base64.StdEncoding.DecodeString(sdraw)
if err != nil {
// Not returning sd as-is in the error-case, as base64.DecodeString
// may return partially decoded data (not nil or empty slice) in case
// of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387
return nil, err
}
return sd, nil
}
// Maintaining old SDDL-based behavior for backward compatibility. All new
// tar headers written by this library will have raw binary for the security
// descriptor.
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
return winio.SddlToSecurityDescriptor(sddl)
}
return nil, nil
}
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
// current file from the tar header and returns it as a byte slice.
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful
for k, v := range hdr.PAXRecords {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return nil, err
}
eas = append(eas, winio.ExtendedAttribute{
Name: k[len(hdrEaPrefix):],
Value: data,
})
}
var eaData []byte
var err error
if len(eas) != 0 {
eaData, err = winio.EncodeExtendedAttributes(eas)
if err != nil {
return nil, err
}
}
return eaData, nil
}
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
// and encodes it into a byte slice. The file for which this function is called must be a
// symlink.
func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,
}
return winio.EncodeReparsePoint(&rp)
}
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := BasicInfoHeader(name, size, fileInfo)
// If r can be seeked, then this function is two-pass: pass 1 collects the
// tar header data, and pass 2 copies the data stream. If r cannot be
// seeked, then some header data (in particular EAs) will be silently lost.
var (
restartPos int64
err error
)
sr, readTwice := r.(io.Seeker)
if readTwice {
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
readTwice = false
}
}
br := winio.NewBackupStreamReader(r)
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= cISREG
if !readTwice {
dataHdr = bhdr
}
case winio.BackupSecurity:
sd, err := io.ReadAll(br)
if err != nil {
return err
}
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= cISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, _ := io.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
}
if rp.IsMountPoint {
hdr.PAXRecords[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
case winio.BackupEaData:
eab, err := io.ReadAll(br)
if err != nil {
return err
}
eas, err := winio.DecodeExtendedAttributes(eab)
if err != nil {
return err
}
for _, ea := range eas {
// Use base64 encoding for the binary value. Note that there
// is no way to encode the EA's flags, since their use doesn't
// make any sense for persisted EAs.
hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
}
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
}
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
if readTwice {
// Get back to the data stream.
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
return err
}
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
break
}
if err != nil {
return err
}
if bhdr.Id == winio.BackupData {
dataHdr = bhdr
}
}
}
// The logic for copying file contents is fairly complicated due to the need for handling sparse files,
// and the weird ways they are represented by BackupRead. A normal file will always either have a data stream
// with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also
// be represented using a series of sparse block streams following the data stream. Additionally, the way sparse
// files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described
// in the list at the bottom of this block comment.
//
// Sparse files can be represented in four different ways, based on the specifics of the file.
// - Size = 0:
// Previously: BackupRead yields no data stream and no sparse block streams.
// Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams.
// - Size > 0, no allocated ranges:
// BackupRead yields a data stream with size = 0. Following is a single sparse block stream with
// size = 0 and offset = <file size>.
// - Size > 0, one allocated range:
// BackupRead yields a data stream with size = <file size> containing the file contents. There are no
// sparse block streams. This is the case if you take a normal file with contents and simply set the
// sparse flag on it.
// - Size > 0, multiple allocated ranges:
// BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated
// range of the file containing the range contents. Finally there is a sparse block stream with
// size = 0 and offset = <file size>.
if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity
// A data stream was found. Copy the data.
// We assume that we will either have a data stream size > 0 XOR have sparse block streams.
if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
if _, err = io.Copy(t, br); err != nil {
return fmt.Errorf("%s: copying contents from data stream: %w", name, err)
}
} else if size > 0 {
// As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
// These files have no sparse block streams, so skip the copySparse call if file size = 0.
if err = copySparse(t, br); err != nil {
return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err)
}
}
}
// Look for streams after the data stream. The only ones we handle are alternate data streams.
// Other streams may have metadata that could be serialized, but the tar header has already
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupAlternateData:
if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
}
altName := strings.TrimSuffix(bhdr.Name, ":$DATA")
hdr = &tar.Header{
Format: hdr.Format,
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
}
}
return nil
}
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()),
// Default to ModTime, we'll pull hdrCreationTime below if present
CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
}
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uint32(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
creationTime, err := parsePAXTime(creationTimeStr)
if err != nil {
return "", 0, nil, err
}
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
}
return name, size, fileInfo, err
}
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
sd, err := SecurityDescriptorFromTarHeader(hdr)
if err != nil {
return nil, err
}
if len(sd) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
eadata, err := ExtendedAttributesFromTarHeader(hdr)
if err != nil {
return nil, err
}
if len(eadata) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupEaData,
Size: int64(len(eadata)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(eadata)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
reparse := EncodeReparsePointFromTarHeader(hdr)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
}

View File

@ -0,0 +1,22 @@
// This package provides utilities for efficiently performing Win32 IO operations in Go.
// Currently, this package is provides support for genreal IO and management of
// - named pipes
// - files
// - [Hyper-V sockets]
//
// This code is similar to Go's [net] package, and uses IO completion ports to avoid
// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
//
// This limits support to Windows Vista and newer operating systems.
//
// Additionally, this package provides support for:
// - creating and managing GUIDs
// - writing to [ETW]
// - opening and manageing VHDs
// - parsing [Windows Image files]
// - auto-generating Win32 API code
//
// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
package winio

View File

@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
return ea, nb, err
}
nameOffset := fileFullEaInformationSize
@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
return ea, nb, err
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
return ea, nb, err
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
eas = append(eas, ea)
b = nb
}
return
return eas, err
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {

View File

@ -11,6 +11,8 @@ import (
"sync/atomic"
"syscall"
"time"
"golang.org/x/sys/windows"
)
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
@ -24,6 +26,8 @@ type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg
func (b *atomicBool) swap(new bool) bool {
var newInt int32
if new {
@ -32,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool {
return atomic.SwapInt32((*int32)(b), newInt) == 1
}
const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
)
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
@ -44,28 +43,28 @@ var (
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
func (*timeoutError) Error() string { return "i/o timeout" }
func (*timeoutError) Timeout() bool { return true }
func (*timeoutError) Temporary() bool { return true }
type timeoutChan chan struct{}
var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle
// ioResult contains the result of an asynchronous IO operation
// ioResult contains the result of an asynchronous IO operation.
type ioResult struct {
bytes uint32
err error
}
// ioOperation represents an outstanding asynchronous Win32 IO
// ioOperation represents an outstanding asynchronous Win32 IO.
type ioOperation struct {
o syscall.Overlapped
ch chan ioResult
}
func initIo() {
func initIO() {
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
@ -94,15 +93,15 @@ type deadlineHandler struct {
timedout atomicBool
}
// makeWin32File makes a new win32File from an existing file handle
// makeWin32File makes a new win32File from an existing file handle.
func makeWin32File(h syscall.Handle) (*win32File, error) {
f := &win32File{handle: h}
ioInitOnce.Do(initIo)
ioInitOnce.Do(initIO)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
@ -121,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return f, nil
}
// closeHandle closes the resources associated with a Win32 handle
// closeHandle closes the resources associated with a Win32 handle.
func (f *win32File) closeHandle() {
f.wgLock.Lock()
// Atomically set that we are closing, releasing the resources only once.
if !f.closing.swap(true) {
f.wgLock.Unlock()
// cancel all IO and wait for it to complete
cancelIoEx(f.handle, nil)
_ = cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
syscall.Close(f.handle)
@ -144,14 +143,14 @@ func (f *win32File) Close() error {
return nil
}
// IsClosed checks if the file has been closed
// IsClosed checks if the file has been closed.
func (f *win32File) IsClosed() bool {
return f.closing.isSet()
}
// prepareIo prepares for a new IO operation.
// prepareIO prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) {
func (f *win32File) prepareIO() (*ioOperation, error) {
f.wgLock.RLock()
if f.closing.isSet() {
f.wgLock.RUnlock()
@ -164,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
return c, nil
}
// ioCompletionProcessor processes completed async IOs forever
// ioCompletionProcessor processes completed async IOs forever.
func ioCompletionProcessor(h syscall.Handle) {
for {
var bytes uint32
@ -178,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) {
}
}
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
// todo: helsaawy - create an asyncIO version that takes a context
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING {
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
return int(bytes), err
}
if f.closing.isSet() {
cancelIoEx(f.handle, &c.o)
_ = cancelIoEx(f.handle, &c.o)
}
var timeout timeoutChan
@ -200,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
select {
case r = <-c.ch:
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
if f.closing.isSet() {
err = ErrFileClosed
}
@ -210,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
}
case <-timeout:
cancelIoEx(f.handle, &c.o)
_ = cancelIoEx(f.handle, &c.o)
r = <-c.ch
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
err = ErrTimeout
}
}
@ -221,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
// runtime.KeepAlive is needed, as c is passed via native
// code to ioCompletionProcessor, c must remain alive
// until the channel read is complete.
// todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
runtime.KeepAlive(c)
return int(r.bytes), err
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
c, err := f.prepareIo()
c, err := f.prepareIO()
if err != nil {
return 0, err
}
@ -239,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) {
var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
runtime.KeepAlive(b)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
} else if err == syscall.ERROR_BROKEN_PIPE {
} else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
return 0, io.EOF
} else {
return n, err
@ -254,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) {
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
c, err := f.prepareIo()
c, err := f.prepareIO()
if err != nil {
return 0, err
}
@ -266,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) {
var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
runtime.KeepAlive(b)
return n, err
}

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package winio
@ -14,13 +15,18 @@ import (
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
FileAttributes uint32
pad uint32 // padding
_ uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
if err := windows.GetFileInformationByHandleEx(
windows.Handle(f.Fd()),
windows.FileBasicInfo,
(*byte)(unsafe.Pointer(bi)),
uint32(unsafe.Sizeof(*bi)),
); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
if err := windows.SetFileInformationByHandle(
windows.Handle(f.Fd()),
windows.FileBasicInfo,
(*byte)(unsafe.Pointer(bi)),
uint32(unsafe.Sizeof(*bi)),
); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@ -48,7 +59,10 @@ type FileStandardInfo struct {
// GetFileStandardInfo retrieves ended information for the file.
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
si := &FileStandardInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
windows.FileStandardInfo,
(*byte)(unsafe.Pointer(si)),
uint32(unsafe.Sizeof(*si))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@ -65,7 +79,12 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
if err := windows.GetFileInformationByHandleEx(
windows.Handle(f.Fd()),
windows.FileIdInfo,
(*byte)(unsafe.Pointer(fileID)),
uint32(unsafe.Sizeof(*fileID)),
); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)

View File

@ -4,6 +4,8 @@
package winio
import (
"context"
"errors"
"fmt"
"io"
"net"
@ -12,16 +14,87 @@ import (
"time"
"unsafe"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/socket"
"github.com/Microsoft/go-winio/pkg/guid"
)
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
const afHVSock = 34 // AF_HYPERV
const (
afHvSock = 34 // AF_HYPERV
// Well known Service and VM IDs
// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
socketError = ^uintptr(0)
)
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
return guid.GUID{}
}
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
return guid.GUID{
Data1: 0xffffffff,
Data2: 0xffff,
Data3: 0xffff,
Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
}
}
// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
return guid.GUID{
Data1: 0xe0e16197,
Data2: 0xdd56,
Data3: 0x4a10,
Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
}
}
// HvsockGUIDSiloHost is the address of a silo's host partition:
// - The silo host of a hosted silo is the utility VM.
// - The silo host of a silo on a physical host is the physical host.
func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
return guid.GUID{
Data1: 0x36bd0c5c,
Data2: 0x7276,
Data3: 0x4223,
Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
}
}
// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
return guid.GUID{
Data1: 0x90db8b89,
Data2: 0xd35,
Data3: 0x4f79,
Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
}
}
// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
// Listening on this VmId accepts connection from:
// - Inside silos: silo host partition.
// - Inside hosted silo: host of the VM.
// - Inside VM: VM host.
// - Physical host: Not supported.
func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
return guid.GUID{
Data1: 0xa42e7cda,
Data2: 0xd03f,
Data3: 0x480c,
Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
}
}
// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
return guid.GUID{
Data2: 0xfacb,
Data3: 0x11e6,
Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
}
}
// An HvsockAddr is an address for a AF_HYPERV socket.
type HvsockAddr struct {
@ -36,8 +109,10 @@ type rawHvsockAddr struct {
ServiceID guid.GUID
}
var _ socket.RawSockaddr = &rawHvsockAddr{}
// Network returns the address's network name, "hvsock".
func (addr *HvsockAddr) Network() string {
func (*HvsockAddr) Network() string {
return "hvsock"
}
@ -47,14 +122,14 @@ func (addr *HvsockAddr) String() string {
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
func VsockServiceID(port uint32) guid.GUID {
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
g := hvsockVsockServiceTemplate() // make a copy
g.Data1 = port
return g
}
func (addr *HvsockAddr) raw() rawHvsockAddr {
return rawHvsockAddr{
Family: afHvSock,
Family: afHVSock,
VMID: addr.VMID,
ServiceID: addr.ServiceID,
}
@ -65,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
addr.ServiceID = raw.ServiceID
}
// Sockaddr returns a pointer to and the size of this struct.
//
// Implements the [socket.RawSockaddr] interface, and allows use in
// [socket.Bind] and [socket.ConnectEx].
func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
}
// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
func (r *rawHvsockAddr) FromBytes(b []byte) error {
n := int(unsafe.Sizeof(rawHvsockAddr{}))
if len(b) < n {
return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
}
copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
if r.Family != afHVSock {
return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
}
return nil
}
// HvsockListener is a socket listener for the AF_HYPERV address family.
type HvsockListener struct {
sock *win32File
addr HvsockAddr
}
var _ net.Listener = &HvsockListener{}
// HvsockConn is a connected socket of the AF_HYPERV address family.
type HvsockConn struct {
sock *win32File
local, remote HvsockAddr
}
func newHvSocket() (*win32File, error) {
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
var _ net.Conn = &HvsockConn{}
func newHVSocket() (*win32File, error) {
fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
@ -94,12 +197,12 @@ func newHvSocket() (*win32File, error) {
// ListenHvsock listens for connections on the specified hvsock address.
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
l := &HvsockListener{addr: *addr}
sock, err := newHvSocket()
sock, err := newHVSocket()
if err != nil {
return nil, l.opErr("listen", err)
}
sa := addr.raw()
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
err = socket.Bind(windows.Handle(sock.handle), &sa)
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
}
@ -121,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr {
// Accept waits for the next connection and returns it.
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock, err := newHvSocket()
sock, err := newHVSocket()
if err != nil {
return nil, l.opErr("accept", err)
}
@ -130,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock.Close()
}
}()
c, err := l.sock.prepareIo()
c, err := l.sock.prepareIO()
if err != nil {
return nil, l.opErr("accept", err)
}
defer l.sock.wg.Done()
// AcceptEx, per documentation, requires an extra 16 bytes per address.
//
// https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
var addrbuf [addrlen * 2]byte
var bytes uint32
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
_, err = l.sock.asyncIo(c, nil, bytes, err)
if err != nil {
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}
conn := &HvsockConn{
sock: sock,
}
// The local address returned in the AcceptEx buffer is the same as the Listener socket's
// address. However, the service GUID reported by GetSockName is different from the Listeners
// socket, and is sometimes the same as the local address of the socket that dialed the
// address, with the service GUID.Data1 incremented, but othertimes is different.
// todo: does the local address matter? is the listener's address or the actual address appropriate?
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
// initialize the accepted socket and update its properties with those of the listening socket
if err = windows.Setsockopt(windows.Handle(sock.handle),
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
}
sock = nil
return conn, nil
}
@ -160,43 +278,171 @@ func (l *HvsockListener) Close() error {
return l.sock.Close()
}
/* Need to finish ConnectEx handling
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
sock, err := newHvSocket()
// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
type HvsockDialer struct {
// Deadline is the time the Dial operation must connect before erroring.
Deadline time.Time
// Retries is the number of additional connects to try if the connection times out, is refused,
// or the host is unreachable
Retries uint
// RetryWait is the time to wait after a connection error to retry
RetryWait time.Duration
rt *time.Timer // redial wait timer
}
// Dial the Hyper-V socket at addr.
//
// See [HvsockDialer.Dial] for more information.
func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
return (&HvsockDialer{}).Dial(ctx, addr)
}
// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
// retries.
//
// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
op := "dial"
// create the conn early to use opErr()
conn = &HvsockConn{
remote: *addr,
}
if !d.Deadline.IsZero() {
var cancel context.CancelFunc
ctx, cancel = context.WithDeadline(ctx, d.Deadline)
defer cancel()
}
// preemptive timeout/cancellation check
if err = ctx.Err(); err != nil {
return nil, conn.opErr(op, err)
}
sock, err := newHVSocket()
if err != nil {
return nil, err
return nil, conn.opErr(op, err)
}
defer func() {
if sock != nil {
sock.Close()
}
}()
c, err := sock.prepareIo()
sa := addr.raw()
err = socket.Bind(windows.Handle(sock.handle), &sa)
if err != nil {
return nil, err
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
}
c, err := sock.prepareIO()
if err != nil {
return nil, conn.opErr(op, err)
}
defer sock.wg.Done()
var bytes uint32
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
for i := uint(0); i <= d.Retries; i++ {
err = socket.ConnectEx(
windows.Handle(sock.handle),
&sa,
nil, // sendBuf
0, // sendDataLen
&bytes,
(*windows.Overlapped)(unsafe.Pointer(&c.o)))
_, err = sock.asyncIO(c, nil, bytes, err)
if i < d.Retries && canRedial(err) {
if err = d.redialWait(ctx); err == nil {
continue
}
}
break
}
if err != nil {
return nil, err
return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
}
conn := &HvsockConn{
sock: sock,
remote: *addr,
// update the connection properties, so shutdown can be used
if err = windows.Setsockopt(
windows.Handle(sock.handle),
windows.SOL_SOCKET,
windows.SO_UPDATE_CONNECT_CONTEXT,
nil, // optvalue
0, // optlen
); err != nil {
return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
}
// get the local name
var sal rawHvsockAddr
err = socket.GetSockName(windows.Handle(sock.handle), &sal)
if err != nil {
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
}
conn.local.fromRaw(&sal)
// one last check for timeout, since asyncIO doesn't check the context
if err = ctx.Err(); err != nil {
return nil, conn.opErr(op, err)
}
conn.sock = sock
sock = nil
return conn, nil
}
*/
// redialWait waits before attempting to redial, resetting the timer as appropriate.
func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
if d.RetryWait == 0 {
return nil
}
if d.rt == nil {
d.rt = time.NewTimer(d.RetryWait)
} else {
// should already be stopped and drained
d.rt.Reset(d.RetryWait)
}
select {
case <-ctx.Done():
case <-d.rt.C:
return nil
}
// stop and drain the timer
if !d.rt.Stop() {
<-d.rt.C
}
return ctx.Err()
}
// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall.
func canRedial(err error) bool {
//nolint:errorlint // guaranteed to be an Errno
switch err {
case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
return true
default:
return false
}
}
func (conn *HvsockConn) opErr(op string, err error) error {
// translate from "file closed" to "socket closed"
if errors.Is(err, ErrFileClosed) {
err = socket.ErrSocketClosed
}
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
}
func (conn *HvsockConn) Read(b []byte) (int, error) {
c, err := conn.sock.prepareIo()
c, err := conn.sock.prepareIO()
if err != nil {
return 0, conn.opErr("read", err)
}
@ -204,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) {
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var flags, bytes uint32
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
if err != nil {
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsarecv", err)
var eno windows.Errno
if errors.As(err, &eno) {
err = os.NewSyscallError("wsarecv", eno)
}
return 0, conn.opErr("read", err)
} else if n == 0 {
@ -230,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) {
}
func (conn *HvsockConn) write(b []byte) (int, error) {
c, err := conn.sock.prepareIo()
c, err := conn.sock.prepareIO()
if err != nil {
return 0, conn.opErr("write", err)
}
@ -238,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) {
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var bytes uint32
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
if err != nil {
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsasend", err)
var eno windows.Errno
if errors.As(err, &eno) {
err = os.NewSyscallError("wsasend", eno)
}
return 0, conn.opErr("write", err)
}
@ -257,13 +505,19 @@ func (conn *HvsockConn) IsClosed() bool {
return conn.sock.IsClosed()
}
// shutdown disables sending or receiving on a socket.
func (conn *HvsockConn) shutdown(how int) error {
if conn.IsClosed() {
return ErrFileClosed
return socket.ErrSocketClosed
}
err := syscall.Shutdown(conn.sock.handle, how)
if err != nil {
// If the connection was closed, shutdowns fail with "not connected"
if errors.Is(err, windows.WSAENOTCONN) ||
errors.Is(err, windows.WSAESHUTDOWN) {
err = socket.ErrSocketClosed
}
return os.NewSyscallError("shutdown", err)
}
return nil
@ -273,7 +527,7 @@ func (conn *HvsockConn) shutdown(how int) error {
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
return conn.opErr("close", err)
return conn.opErr("closeread", err)
}
return nil
}
@ -283,7 +537,7 @@ func (conn *HvsockConn) CloseRead() error {
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {
return conn.opErr("close", err)
return conn.opErr("closewrite", err)
}
return nil
}
@ -300,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr {
// SetDeadline implements the net.Conn SetDeadline method.
func (conn *HvsockConn) SetDeadline(t time.Time) error {
conn.SetReadDeadline(t)
conn.SetWriteDeadline(t)
// todo: implement `SetDeadline` for `win32File`
if err := conn.SetReadDeadline(t); err != nil {
return fmt.Errorf("set read deadline: %w", err)
}
if err := conn.SetWriteDeadline(t); err != nil {
return fmt.Errorf("set write deadline: %w", err)
}
return nil
}

View File

@ -0,0 +1,2 @@
// This package contains Win32 filesystem functionality.
package fs

View File

@ -0,0 +1,202 @@
//go:build windows
package fs
import (
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/stringbuffer"
)
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
const NullHandle windows.Handle = 0
// AccessMask defines standard, specific, and generic rights.
//
// Bitmask:
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
// +---------------+---------------+-------------------------------+
// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
// |R|W|E|A| |S| | |
// +-+-------------+---------------+-------------------------------+
//
// GR Generic Read
// GW Generic Write
// GE Generic Exectue
// GA Generic All
// Resvd Reserved
// AS Access Security System
//
// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
//
// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
//
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
type AccessMask = windows.ACCESS_MASK
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
// Not actually any.
//
// For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
FILE_ANY_ACCESS AccessMask = 0
// Specific Object Access
// from ntioapi.h
FILE_READ_DATA AccessMask = (0x0001) // file & pipe
FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
FILE_ADD_FILE AccessMask = (0x0002) // directory
FILE_APPEND_DATA AccessMask = (0x0004) // file
FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
FILE_READ_EA AccessMask = (0x0008) // file & directory
FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
FILE_WRITE_EA AccessMask = (0x0010) // file & directory
FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
FILE_EXECUTE AccessMask = (0x0020) // file
FILE_TRAVERSE AccessMask = (0x0020) // directory
FILE_DELETE_CHILD AccessMask = (0x0040) // directory
FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
// Standard Access
// from ntseapi.h
DELETE AccessMask = 0x0001_0000
READ_CONTROL AccessMask = 0x0002_0000
WRITE_DAC AccessMask = 0x0004_0000
WRITE_OWNER AccessMask = 0x0008_0000
SYNCHRONIZE AccessMask = 0x0010_0000
STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
)
type FileShareMode uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
FILE_SHARE_NONE FileShareMode = 0x00
FILE_SHARE_READ FileShareMode = 0x01
FILE_SHARE_WRITE FileShareMode = 0x02
FILE_SHARE_DELETE FileShareMode = 0x04
FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
)
type FileCreationDisposition uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
// from winbase.h
CREATE_NEW FileCreationDisposition = 0x01
CREATE_ALWAYS FileCreationDisposition = 0x02
OPEN_EXISTING FileCreationDisposition = 0x03
OPEN_ALWAYS FileCreationDisposition = 0x04
TRUNCATE_EXISTING FileCreationDisposition = 0x05
)
// CreateFile and co. take flags or attributes together as one parameter.
// Define alias until we can use generics to allow both
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
type FileFlagOrAttribute uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const ( // from winnt.h
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
)
type FileSQSFlag = FileFlagOrAttribute
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const ( // from winbase.h
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000
)
// GetFinalPathNameByHandle flags
//
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
type GetFinalPathFlag uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
FILE_NAME_OPENED GetFinalPathFlag = 0x8
VOLUME_NAME_DOS GetFinalPathFlag = 0x0
VOLUME_NAME_GUID GetFinalPathFlag = 0x1
VOLUME_NAME_NT GetFinalPathFlag = 0x2
VOLUME_NAME_NONE GetFinalPathFlag = 0x4
)
// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
// with the given handle and flags. It transparently takes care of creating a buffer of the
// correct size for the call.
//
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
b := stringbuffer.NewWString()
//TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
for {
n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
if err != nil {
return "", err
}
// If the buffer wasn't large enough, n will be the total size needed (including null terminator).
// Resize and try again.
if n > b.Cap() {
b.ResizeTo(n)
continue
}
// If the buffer is large enough, n will be the size not including the null terminator.
// Convert to a Go string and return.
return b.String(), nil
}
}

View File

@ -0,0 +1,12 @@
package fs
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
// Impersonation levels
const (
SecurityAnonymous SecurityImpersonationLevel = 0
SecurityIdentification SecurityImpersonationLevel = 1
SecurityImpersonation SecurityImpersonationLevel = 2
SecurityDelegation SecurityImpersonationLevel = 3
)

View File

@ -0,0 +1,64 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package fs
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procCreateFileW = modkernel32.NewProc("CreateFileW")
)
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = windows.Handle(r0)
if handle == windows.InvalidHandle {
err = errnoErr(e1)
}
return
}

View File

@ -0,0 +1,20 @@
package socket
import (
"unsafe"
)
// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
// struct must meet the Win32 sockaddr requirements specified here:
// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
//
// Specifically, the struct size must be least larger than an int16 (unsigned short)
// for the address family.
type RawSockaddr interface {
// Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
// for the RawSockaddr's data to be overwritten by syscalls (if necessary).
//
// It is the callers responsibility to validate that the values are valid; invalid
// pointers or size can cause a panic.
Sockaddr() (unsafe.Pointer, int32, error)
}

View File

@ -0,0 +1,179 @@
//go:build windows
package socket
import (
"errors"
"fmt"
"net"
"sync"
"syscall"
"unsafe"
"github.com/Microsoft/go-winio/pkg/guid"
"golang.org/x/sys/windows"
)
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
const socketError = uintptr(^uint32(0))
var (
// todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
ErrBufferSize = errors.New("buffer size")
ErrAddrFamily = errors.New("address family")
ErrInvalidPointer = errors.New("invalid pointer")
ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
)
// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
func GetSockName(s windows.Handle, rsa RawSockaddr) error {
ptr, l, err := rsa.Sockaddr()
if err != nil {
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
}
// although getsockname returns WSAEFAULT if the buffer is too small, it does not set
// &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
return getsockname(s, ptr, &l)
}
// GetPeerName returns the remote address the socket is connected to.
//
// See [GetSockName] for more information.
func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
ptr, l, err := rsa.Sockaddr()
if err != nil {
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
}
return getpeername(s, ptr, &l)
}
func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
ptr, l, err := rsa.Sockaddr()
if err != nil {
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
}
return bind(s, ptr, l)
}
// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
// their sockaddr interface, so they cannot be used with HvsockAddr
// Replicate functionality here from
// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
// runtime via a WSAIoctl call:
// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
type runtimeFunc struct {
id guid.GUID
once sync.Once
addr uintptr
err error
}
func (f *runtimeFunc) Load() error {
f.once.Do(func() {
var s windows.Handle
s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
if f.err != nil {
return
}
defer windows.CloseHandle(s) //nolint:errcheck
var n uint32
f.err = windows.WSAIoctl(s,
windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
(*byte)(unsafe.Pointer(&f.id)),
uint32(unsafe.Sizeof(f.id)),
(*byte)(unsafe.Pointer(&f.addr)),
uint32(unsafe.Sizeof(f.addr)),
&n,
nil, // overlapped
0, // completionRoutine
)
})
return f.err
}
var (
// todo: add `AcceptEx` and `GetAcceptExSockaddrs`
WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
Data1: 0x25a207b9,
Data2: 0xddf3,
Data3: 0x4660,
Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
}
connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
)
func ConnectEx(
fd windows.Handle,
rsa RawSockaddr,
sendBuf *byte,
sendDataLen uint32,
bytesSent *uint32,
overlapped *windows.Overlapped,
) error {
if err := connectExFunc.Load(); err != nil {
return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
}
ptr, n, err := rsa.Sockaddr()
if err != nil {
return err
}
return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
}
// BOOL LpfnConnectex(
// [in] SOCKET s,
// [in] const sockaddr *name,
// [in] int namelen,
// [in, optional] PVOID lpSendBuffer,
// [in] DWORD dwSendDataLength,
// [out] LPDWORD lpdwBytesSent,
// [in] LPOVERLAPPED lpOverlapped
// )
func connectEx(
s windows.Handle,
name unsafe.Pointer,
namelen int32,
sendBuf *byte,
sendDataLen uint32,
bytesSent *uint32,
overlapped *windows.Overlapped,
) (err error) {
// todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN
r1, _, e1 := syscall.Syscall9(connectExFunc.addr,
7,
uintptr(s),
uintptr(name),
uintptr(namelen),
uintptr(unsafe.Pointer(sendBuf)),
uintptr(sendDataLen),
uintptr(unsafe.Pointer(bytesSent)),
uintptr(unsafe.Pointer(overlapped)),
0,
0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return err
}

View File

@ -0,0 +1,72 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package socket
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
procbind = modws2_32.NewProc("bind")
procgetpeername = modws2_32.NewProc("getpeername")
procgetsockname = modws2_32.NewProc("getsockname")
)
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
err = errnoErr(e1)
}
return
}
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
if r1 == socketError {
err = errnoErr(e1)
}
return
}
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
if r1 == socketError {
err = errnoErr(e1)
}
return
}

View File

@ -0,0 +1,132 @@
package stringbuffer
import (
"sync"
"unicode/utf16"
)
// TODO: worth exporting and using in mkwinsyscall?
// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
// large path strings:
// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
const MinWStringCap = 310
// use *[]uint16 since []uint16 creates an extra allocation where the slice header
// is copied to heap and then referenced via pointer in the interface header that sync.Pool
// stores.
var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
New: func() interface{} {
b := make([]uint16, MinWStringCap)
return &b
},
}
func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
// This avoids taking a pointer to the slice header in WString, which can be set to nil.
func freeBuffer(b []uint16) { pathPool.Put(&b) }
// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
// for interacting with Win32 APIs.
// Sizes are specified as uint32 and not int.
//
// It is not thread safe.
type WString struct {
// type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
// raw buffer
b []uint16
}
// NewWString returns a [WString] allocated from a shared pool with an
// initial capacity of at least [MinWStringCap].
// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
//
// The buffer should be freed via [WString.Free]
func NewWString() *WString {
return &WString{
b: newBuffer(),
}
}
func (b *WString) Free() {
if b.empty() {
return
}
freeBuffer(b.b)
b.b = nil
}
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
// previous buffer back into pool.
func (b *WString) ResizeTo(c uint32) uint32 {
// allready sufficient (or n is 0)
if c <= b.Cap() {
return b.Cap()
}
if c <= MinWStringCap {
c = MinWStringCap
}
// allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
if c <= 2*b.Cap() {
c = 2 * b.Cap()
}
b2 := make([]uint16, c)
if !b.empty() {
copy(b2, b.b)
freeBuffer(b.b)
}
b.b = b2
return c
}
// Buffer returns the underlying []uint16 buffer.
func (b *WString) Buffer() []uint16 {
if b.empty() {
return nil
}
return b.b
}
// Pointer returns a pointer to the first uint16 in the buffer.
// If the [WString.Free] has already been called, the pointer will be nil.
func (b *WString) Pointer() *uint16 {
if b.empty() {
return nil
}
return &b.b[0]
}
// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
//
// It assumes that the data is null-terminated.
func (b *WString) String() string {
// Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
// and would make this code Windows-only, which makes no sense.
// So copy UTF16ToString code into here.
// If other windows-specific code is added, switch to [windows.UTF16ToString]
s := b.b
for i, v := range s {
if v == 0 {
s = s[:i]
break
}
}
return string(utf16.Decode(s))
}
// Cap returns the underlying buffer capacity.
func (b *WString) Cap() uint32 {
if b.empty() {
return 0
}
return b.cap()
}
func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
func (b *WString) empty() bool { return b == nil || b.cap() == 0 }

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package winio
@ -13,18 +14,21 @@ import (
"syscall"
"time"
"unsafe"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/fs"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
type ioStatusBlock struct {
Status, Information uintptr
@ -51,45 +55,22 @@ type securityDescriptor struct {
Control uint16
Owner uintptr
Group uintptr
Sacl uintptr
Dacl uintptr
Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
}
type ntstatus int32
type ntStatus int32
func (status ntstatus) Err() error {
func (status ntStatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
cERROR_NO_DATA = syscall.Errno(232)
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_TYPE_MESSAGE = 4
cPIPE_READMODE_MESSAGE = 2
cFILE_OPEN = 1
cFILE_CREATE = 2
cFILE_PIPE_MESSAGE_TYPE = 1
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
cSE_DACL_PRESENT = 4
)
var (
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
// This error should match net.errClosing since docker takes a dependency on its text.
ErrPipeListenerClosed = errors.New("use of closed network connection")
ErrPipeListenerClosed = net.ErrClosed
errPipeWriteClosed = errors.New("pipe has been closed for write")
)
@ -116,9 +97,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr {
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
f.SetReadDeadline(t)
f.SetWriteDeadline(t)
return nil
if err := f.SetReadDeadline(t); err != nil {
return err
}
return f.SetWriteDeadline(t)
}
// CloseWrite closes the write side of a message pipe in byte mode.
@ -157,14 +139,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return 0, io.EOF
}
n, err := f.win32File.Read(b)
if err == io.EOF {
if err == io.EOF { //nolint:errorlint
// If this was the result of a zero-byte read, then
// it is possible that the read was due to a zero-size
// message. Since we are simulating CloseWrite with a
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
} else if err == syscall.ERROR_MORE_DATA {
} else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
@ -173,7 +155,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return n, err
}
func (s pipeAddress) Network() string {
func (pipeAddress) Network() string {
return "pipe"
}
@ -182,18 +164,25 @@ func (s pipeAddress) String() string {
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) {
for {
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
wh, err := fs.CreateFile(*path,
access,
0, // mode
nil, // security attributes
fs.OPEN_EXISTING,
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS,
0, // template file handle
)
h := syscall.Handle(wh)
if err == nil {
return h, nil
}
if err != cERROR_PIPE_BUSY {
if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
return h, &os.PathError{Err: err, Op: "open", Path: *path}
}
// Wait 10 msec and try again. This is a rather simplistic
@ -213,9 +202,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
} else {
absTimeout = time.Now().Add(2 * time.Second)
}
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
defer cancel()
conn, err := DialPipeContext(ctx, path)
if err == context.DeadlineExceeded {
if errors.Is(err, context.DeadlineExceeded) {
return nil, ErrTimeout
}
return conn, err
@ -232,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
var err error
var h syscall.Handle
h, err = tryDialPipe(ctx, &path, access)
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access))
if err != nil {
return nil, err
}
@ -251,7 +241,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
// If the pipe is in message mode, return a message byte pipe, which
// supports CloseWrite().
if flags&cPIPE_TYPE_MESSAGE != 0 {
if flags&windows.PIPE_TYPE_MESSAGE != 0 {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: f, path: path},
}, nil
@ -283,17 +273,22 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
oa.Length = unsafe.Sizeof(oa)
var ntPath unicodeString
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
if err := rtlDosPathNameToNtPathName(&path16[0],
&ntPath,
0,
0,
).Err(); err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
defer localFree(ntPath.Buffer)
oa.ObjectName = &ntPath
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
// The security descriptor is only needed for the first pipe.
if first {
if sd != nil {
len := uint32(len(sd))
sdb := localAlloc(0, len)
l := uint32(len(sd))
sdb := localAlloc(0, l)
defer localFree(sdb)
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
@ -301,28 +296,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
// Construct the default named pipe security descriptor.
var dacl uintptr
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
}
defer localFree(dacl)
sdb := &securityDescriptor{
Revision: 1,
Control: cSE_DACL_PRESENT,
Control: windows.SE_DACL_PRESENT,
Dacl: dacl,
}
oa.SecurityDescriptor = sdb
}
}
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
if c.MessageMode {
typ |= cFILE_PIPE_MESSAGE_TYPE
typ |= windows.FILE_PIPE_MESSAGE_TYPE
}
disposition := uint32(cFILE_OPEN)
disposition := uint32(windows.FILE_OPEN)
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
if first {
disposition = cFILE_CREATE
disposition = windows.FILE_CREATE
// By not asking for read or write access, the named pipe file system
// will put this pipe into an initially disconnected state, blocking
// client connections until the next call with first == false.
@ -335,7 +330,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
h syscall.Handle
iosb ioStatusBlock
)
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
err = ntCreateNamedPipeFile(&h,
access,
&oa,
&iosb,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE,
disposition,
0,
typ,
0,
0,
0xffffffff,
uint32(c.InputBufferSize),
uint32(c.OutputBufferSize),
&timeout).Err()
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
@ -380,7 +388,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed {
if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
err = ErrPipeListenerClosed
}
}
@ -402,12 +410,12 @@ func (l *win32PipeListener) listenerRoutine() {
p, err = l.makeConnectedServerPipe()
// If the connection was immediately closed by the client, try
// again.
if err != cERROR_NO_DATA {
if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
break
}
}
responseCh <- acceptResponse{p, err}
closed = err == ErrPipeListenerClosed
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
}
}
syscall.Close(l.firstHandle)
@ -469,15 +477,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
}
func connectPipe(p *win32File) error {
c, err := p.prepareIo()
c, err := p.prepareIO()
if err != nil {
return err
}
defer p.wg.Done()
err = connectNamedPipe(p.handle, &c.o)
_, err = p.asyncIo(c, nil, 0, err)
if err != nil && err != cERROR_PIPE_CONNECTED {
_, err = p.asyncIO(c, nil, 0, err)
if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
return err
}
return nil

View File

@ -0,0 +1,308 @@
//go:build windows
// +build windows
package bindfilter
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./bind_filter.go
//sys bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath string, virtTargetPath string, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilter?
//sys bfRemoveMapping(jobHandle windows.Handle, virtRootPath string) (hr error) = bindfltapi.BfRemoveMapping?
//sys bfGetMappings(flags uint32, jobHandle windows.Handle, virtRootPath *uint16, sid *windows.SID, bufferSize *uint32, outBuffer *byte) (hr error) = bindfltapi.BfGetMappings?
// BfSetupFilter flags. See:
// https://github.com/microsoft/BuildXL/blob/a6dce509f0d4f774255e5fbfb75fa6d5290ed163/Public/Src/Utilities/Native/Processes/Windows/NativeContainerUtilities.cs#L193-L240
//
//nolint:revive // var-naming: ALL_CAPS
const (
BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001
// Tells bindflt to fail mapping with STATUS_INVALID_PARAMETER if a mapping produces
// multiple targets.
BINDFLT_FLAG_NO_MULTIPLE_TARGETS uint32 = 0x00000040
)
//nolint:revive // var-naming: ALL_CAPS
const (
BINDFLT_GET_MAPPINGS_FLAG_VOLUME uint32 = 0x00000001
BINDFLT_GET_MAPPINGS_FLAG_SILO uint32 = 0x00000002
BINDFLT_GET_MAPPINGS_FLAG_USER uint32 = 0x00000004
)
// ApplyFileBinding creates a global mount of the source in root, with an optional
// read only flag.
// The bind filter allows us to create mounts of directories and volumes. By default it allows
// us to mount multiple sources inside a single root, acting as an overlay. Files from the
// second source will superscede the first source that was mounted.
// This function disables this behavior and sets the BINDFLT_FLAG_NO_MULTIPLE_TARGETS flag
// on the mount.
func ApplyFileBinding(root, source string, readOnly bool) error {
// The parent directory needs to exist for the bind to work. MkdirAll stats and
// returns nil if the directory exists internally so we should be fine to mkdirall
// every time.
if err := os.MkdirAll(filepath.Dir(root), 0); err != nil {
return err
}
if strings.Contains(source, "Volume{") && !strings.HasSuffix(source, "\\") {
// Add trailing slash to volumes, otherwise we get an error when binding it to
// a folder.
source = source + "\\"
}
flags := BINDFLT_FLAG_NO_MULTIPLE_TARGETS
if readOnly {
flags |= BINDFLT_FLAG_READ_ONLY_MAPPING
}
// Set the job handle to 0 to create a global mount.
if err := bfSetupFilter(
0,
flags,
root,
source,
nil,
0,
); err != nil {
return fmt.Errorf("failed to bind target %q to root %q: %w", source, root, err)
}
return nil
}
// RemoveFileBinding removes a mount from the root path.
func RemoveFileBinding(root string) error {
if err := bfRemoveMapping(0, root); err != nil {
return fmt.Errorf("removing file binding: %w", err)
}
return nil
}
// GetBindMappings returns a list of bind mappings that have their root on a
// particular volume. The volumePath parameter can be any path that exists on
// a volume. For example, if a number of mappings are created in C:\ProgramData\test,
// to get a list of those mappings, the volumePath parameter would have to be set to
// C:\ or the VOLUME_NAME_GUID notation of C:\ (\\?\Volume{GUID}\), or any child
// path that exists.
func GetBindMappings(volumePath string) ([]BindMapping, error) {
rootPtr, err := windows.UTF16PtrFromString(volumePath)
if err != nil {
return nil, err
}
flags := BINDFLT_GET_MAPPINGS_FLAG_VOLUME
// allocate a large buffer for results
var outBuffSize uint32 = 256 * 1024
buf := make([]byte, outBuffSize)
if err := bfGetMappings(flags, 0, rootPtr, nil, &outBuffSize, &buf[0]); err != nil {
return nil, err
}
if outBuffSize < 12 {
return nil, fmt.Errorf("invalid buffer returned")
}
result := buf[:outBuffSize]
// The first 12 bytes are the three uint32 fields in getMappingsResponseHeader{}
headerBuffer := result[:12]
// The alternative to using unsafe and casting it to the above defined structures, is to manually
// parse the fields. Not too terrible, but not sure it'd worth the trouble.
header := *(*getMappingsResponseHeader)(unsafe.Pointer(&headerBuffer[0]))
if header.MappingCount == 0 {
// no mappings
return []BindMapping{}, nil
}
mappingsBuffer := result[12 : int(unsafe.Sizeof(mappingEntry{}))*int(header.MappingCount)]
// Get a pointer to the first mapping in the slice
mappingsPointer := (*mappingEntry)(unsafe.Pointer(&mappingsBuffer[0]))
// Get slice of mappings
mappings := unsafe.Slice(mappingsPointer, header.MappingCount)
mappingEntries := make([]BindMapping, header.MappingCount)
for i := 0; i < int(header.MappingCount); i++ {
bindMapping, err := getBindMappingFromBuffer(result, mappings[i])
if err != nil {
return nil, fmt.Errorf("fetching bind mappings: %w", err)
}
mappingEntries[i] = bindMapping
}
return mappingEntries, nil
}
// mappingEntry holds information about where in the response buffer we can
// find information about the virtual root (the mount point) and the targets (sources)
// that get mounted, as well as the flags used to bind the targets to the virtual root.
type mappingEntry struct {
VirtRootLength uint32
VirtRootOffset uint32
Flags uint32
NumberOfTargets uint32
TargetEntriesOffset uint32
}
type mappingTargetEntry struct {
TargetRootLength uint32
TargetRootOffset uint32
}
// getMappingsResponseHeader represents the first 12 bytes of the BfGetMappings() response.
// It gives us the size of the buffer, the status of the call and the number of mappings.
// A response
type getMappingsResponseHeader struct {
Size uint32
Status uint32
MappingCount uint32
}
type BindMapping struct {
MountPoint string
Flags uint32
Targets []string
}
func decodeEntry(buffer []byte) (string, error) {
name := make([]uint16, len(buffer)/2)
err := binary.Read(bytes.NewReader(buffer), binary.LittleEndian, &name)
if err != nil {
return "", fmt.Errorf("decoding name: %w", err)
}
return windows.UTF16ToString(name), nil
}
func getTargetsFromBuffer(buffer []byte, offset, count int) ([]string, error) {
if len(buffer) < offset+count*6 {
return nil, fmt.Errorf("invalid buffer")
}
targets := make([]string, count)
for i := 0; i < count; i++ {
entryBuf := buffer[offset+i*8 : offset+i*8+8]
tgt := *(*mappingTargetEntry)(unsafe.Pointer(&entryBuf[0]))
if len(buffer) < int(tgt.TargetRootOffset)+int(tgt.TargetRootLength) {
return nil, fmt.Errorf("invalid buffer")
}
decoded, err := decodeEntry(buffer[tgt.TargetRootOffset : tgt.TargetRootOffset+tgt.TargetRootLength])
if err != nil {
return nil, fmt.Errorf("decoding name: %w", err)
}
decoded, err = getFinalPath(decoded)
if err != nil {
return nil, fmt.Errorf("fetching final path: %w", err)
}
targets[i] = decoded
}
return targets, nil
}
func getFinalPath(pth string) (string, error) {
// BfGetMappings returns VOLUME_NAME_NT paths like \Device\HarddiskVolume2\ProgramData.
// These can be accessed by prepending \\.\GLOBALROOT to the path. We use this to get the
// DOS paths for these files.
if strings.HasPrefix(pth, `\Device`) {
pth = `\\.\GLOBALROOT` + pth
}
han, err := openPath(pth)
if err != nil {
return "", fmt.Errorf("fetching file handle: %w", err)
}
defer func() {
_ = windows.CloseHandle(han)
}()
buf := make([]uint16, 100)
var flags uint32 = 0x0
for {
n, err := windows.GetFinalPathNameByHandle(han, &buf[0], uint32(len(buf)), flags)
if err != nil {
// if we mounted a volume that does not also have a drive letter assigned, attempting to
// fetch the VOLUME_NAME_DOS will fail with os.ErrNotExist. Attempt to get the VOLUME_NAME_GUID.
if errors.Is(err, os.ErrNotExist) && flags != 0x1 {
flags = 0x1
continue
}
return "", fmt.Errorf("getting final path name: %w", err)
}
if n < uint32(len(buf)) {
break
}
buf = make([]uint16, n)
}
finalPath := syscall.UTF16ToString(buf)
// We got VOLUME_NAME_DOS, we need to strip away some leading slashes.
// Leave unchanged if we ended up requesting VOLUME_NAME_GUID
if len(finalPath) > 4 && finalPath[:4] == `\\?\` && flags == 0x0 {
finalPath = finalPath[4:]
if len(finalPath) > 3 && finalPath[:3] == `UNC` {
// return path like \\server\share\...
finalPath = `\` + finalPath[3:]
}
}
return finalPath, nil
}
func getBindMappingFromBuffer(buffer []byte, entry mappingEntry) (BindMapping, error) {
if len(buffer) < int(entry.VirtRootOffset)+int(entry.VirtRootLength) {
return BindMapping{}, fmt.Errorf("invalid buffer")
}
src, err := decodeEntry(buffer[entry.VirtRootOffset : entry.VirtRootOffset+entry.VirtRootLength])
if err != nil {
return BindMapping{}, fmt.Errorf("decoding entry: %w", err)
}
targets, err := getTargetsFromBuffer(buffer, int(entry.TargetEntriesOffset), int(entry.NumberOfTargets))
if err != nil {
return BindMapping{}, fmt.Errorf("fetching targets: %w", err)
}
src, err = getFinalPath(src)
if err != nil {
return BindMapping{}, fmt.Errorf("fetching final path: %w", err)
}
return BindMapping{
Flags: entry.Flags,
Targets: targets,
MountPoint: src,
}, nil
}
func openPath(path string) (windows.Handle, error) {
u16, err := windows.UTF16PtrFromString(path)
if err != nil {
return 0, err
}
h, err := windows.CreateFile(
u16,
0,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
nil,
windows.OPEN_EXISTING,
windows.FILE_FLAG_BACKUP_SEMANTICS, // Needed to open a directory handle.
0)
if err != nil {
return 0, &os.PathError{
Op: "CreateFile",
Path: path,
Err: err,
}
}
return h, nil
}

View File

@ -0,0 +1,116 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package bindfilter
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll")
procBfGetMappings = modbindfltapi.NewProc("BfGetMappings")
procBfRemoveMapping = modbindfltapi.NewProc("BfRemoveMapping")
procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter")
)
func bfGetMappings(flags uint32, jobHandle windows.Handle, virtRootPath *uint16, sid *windows.SID, bufferSize *uint32, outBuffer *byte) (hr error) {
hr = procBfGetMappings.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procBfGetMappings.Addr(), 6, uintptr(flags), uintptr(jobHandle), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(bufferSize)), uintptr(unsafe.Pointer(outBuffer)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func bfRemoveMapping(jobHandle windows.Handle, virtRootPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(virtRootPath)
if hr != nil {
return
}
return _bfRemoveMapping(jobHandle, _p0)
}
func _bfRemoveMapping(jobHandle windows.Handle, virtRootPath *uint16) (hr error) {
hr = procBfRemoveMapping.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procBfRemoveMapping.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(virtRootPath)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath string, virtTargetPath string, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(virtRootPath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(virtTargetPath)
if hr != nil {
return
}
return _bfSetupFilter(jobHandle, flags, _p0, _p1, virtExceptions, virtExceptionPathCount)
}
func _bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) {
hr = procBfSetupFilter.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}

View File

@ -1,5 +1,3 @@
// +build windows
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
@ -9,24 +7,26 @@ package guid
import (
"crypto/rand"
"crypto/sha1"
"crypto/sha1" //nolint:gosec // not used for secure application
"encoding"
"encoding/binary"
"fmt"
"strconv"
)
//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
// The variants specified by RFC 4122.
// The variants specified by RFC 4122 section 4.1.1.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
VariantRFC4122
VariantRFC4122 // RFC 4122
VariantMicrosoft
VariantFuture
)
@ -36,6 +36,10 @@ const (
// hash of an input string.
type Version uint8
func (v Version) String() string {
return strconv.FormatUint(uint64(v), 10)
}
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
@ -61,7 +65,7 @@ func NewV4() (GUID, error) {
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
b := sha1.New()
b := sha1.New() //nolint:gosec // not used for secure application
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package guid

View File

@ -1,3 +1,6 @@
//go:build windows
// +build windows
package guid
import "golang.org/x/sys/windows"

View File

@ -0,0 +1,27 @@
// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
package guid
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[VariantUnknown-0]
_ = x[VariantNCS-1]
_ = x[VariantRFC4122-2]
_ = x[VariantMicrosoft-3]
_ = x[VariantFuture-4]
}
const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
func (i Variant) String() string {
if i >= Variant(len(_Variant_index)-1) {
return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
}

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package winio
@ -24,22 +25,17 @@ import (
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
SE_PRIVILEGE_ENABLED = 2
//revive:disable-next-line:var-naming ALL_CAPS
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
//revive:disable-next-line:var-naming ALL_CAPS
ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
SeSecurityPrivilege = "SeSecurityPrivilege"
)
const (
securityAnonymous = iota
securityIdentification
securityImpersonation
securityDelegation
)
var (
privNames = make(map[string]uint64)
privNameMutex sync.Mutex
@ -51,11 +47,9 @@ type PrivilegeError struct {
}
func (e *PrivilegeError) Error() string {
s := ""
s := "Could not enable privilege "
if len(e.privileges) > 1 {
s = "Could not enable privileges "
} else {
s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
@ -94,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
}
func mapPrivileges(names []string) ([]uint64, error) {
var privileges []uint64
privileges := make([]uint64, 0, len(names))
privNameMutex.Lock()
defer privNameMutex.Unlock()
for _, name := range names {
@ -127,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
return err
}
p, _ := windows.GetCurrentProcess()
p := windows.CurrentProcess()
var token windows.Token
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
if err != nil {
@ -140,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
_ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, action)
_ = binary.Write(&b, binary.LittleEndian, p)
_ = binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
@ -151,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e
if !success {
return err
}
if err == ERROR_NOT_ALL_ASSIGNED {
if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
return &PrivilegeError{privileges}
}
return nil
@ -177,7 +171,7 @@ func getPrivilegeName(luid uint64) string {
}
func newThreadToken() (windows.Token, error) {
err := impersonateSelf(securityImpersonation)
err := impersonateSelf(windows.SecurityImpersonation)
if err != nil {
return 0, err
}

View File

@ -1,3 +1,6 @@
//go:build windows
// +build windows
package winio
import (
@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte {
}
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, &data)
_ = binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
binary.Write(&b, binary.LittleEndian, flags)
_ = binary.Write(&b, binary.LittleEndian, flags)
}
binary.Write(&b, binary.LittleEndian, ntTarget16)
binary.Write(&b, binary.LittleEndian, target16)
_ = binary.Write(&b, binary.LittleEndian, ntTarget16)
_ = binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}

View File

@ -1,23 +1,25 @@
//go:build windows
// +build windows
package winio
import (
"errors"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
//sys localFree(mem uintptr) = LocalFree
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
const (
cERROR_NONE_MAPPED = syscall.Errno(1332)
)
type AccountLookupError struct {
Name string
Err error
@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string {
return "lookup account: empty account name specified"
}
var s string
switch e.Err {
case cERROR_NONE_MAPPED:
switch {
case errors.Is(e.Err, windows.ERROR_INVALID_SID):
s = "the security ID structure is invalid"
case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
s = "not found"
default:
s = e.Err.Error()
@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string {
return "lookup account " + e.Name + ": " + s
}
func (e *AccountLookupError) Unwrap() error { return e.Err }
type SddlConversionError struct {
Sddl string
Err error
@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
func (e *SddlConversionError) Unwrap() error { return e.Err }
// LookupSidByName looks up the SID of an account by name
//
//revive:disable-next-line:var-naming SID, not Sid
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) {
return sid, nil
}
// LookupNameBySid looks up the name of an account by SID
//
//revive:disable-next-line:var-naming SID, not Sid
func LookupNameBySid(sid string) (name string, err error) {
if sid == "" {
return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
}
sidBuffer, err := windows.UTF16PtrFromString(sid)
if err != nil {
return "", &AccountLookupError{sid, err}
}
var sidPtr *byte
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
return "", &AccountLookupError{sid, err}
}
defer localFree(uintptr(unsafe.Pointer(sidPtr)))
var nameSize, refDomainSize, sidNameUse uint32
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
return "", &AccountLookupError{sid, err}
}
nameBuffer := make([]uint16, nameSize)
refDomainBuffer := make([]uint16, refDomainSize)
err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
if err != nil {
return "", &AccountLookupError{sid, err}
}
name = windows.UTF16ToString(nameBuffer)
return name, nil
}
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
var sdBuffer uintptr
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
func SecurityDescriptorToSddl(sd []byte) (string, error) {
var sddl *uint16
// The returned string length seems to including an aribtrary number of terminating NULs.
// The returned string length seems to include an arbitrary number of terminating NULs.
// Don't use it.
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
if err != nil {

View File

@ -1,3 +1,5 @@
//go:build windows
package winio
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go

View File

@ -0,0 +1,5 @@
//go:build tools
package winio
import _ "golang.org/x/tools/cmd/stringer"

View File

@ -0,0 +1,57 @@
/*
mkwinsyscall generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
- The parameter lists must give a name for each argument. This
includes return parameters.
- The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
- If the return parameter is an error number, it must be named err.
- If go func name needs to be different from its winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
- Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
- If the function name ends in a "?", then the function not existing is non-
fatal, and an error will be returned instead of panicking.
Usage:
mkwinsyscall [flags] [path ...]
Flags
-output string
Output file name (standard output if omitted).
-sort
Sort DLL and function declarations (default true).
Intended to help transition from older versions of mkwinsyscall by making diffs
easier to read and understand.
-systemdll
Whether all DLLs should be loaded from the Windows system directory (default true).
-trace
Generate print statement after every syscall.
-utf16
Encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W' (default true).
-winio
Import this package ("github.com/Microsoft/go-winio").
*/
package main

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,7 @@ import (
"golang.org/x/sys/windows"
)
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
@ -62,8 +62,8 @@ type OpenVirtualDiskParameters struct {
Version2 OpenVersion2
}
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
type openVersion2 struct {
getInfoOnly int32
@ -87,9 +87,10 @@ type AttachVirtualDiskParameters struct {
}
const (
//revive:disable-next-line:var-naming ALL_CAPS
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3
// Access Mask for opening a VHD
// Access Mask for opening a VHD.
VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000
@ -101,7 +102,7 @@ const (
VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000
VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000
// Flags for creating a VHD
// Flags for creating a VHD.
CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0
CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1
CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2
@ -109,12 +110,12 @@ const (
CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8
CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10
CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd
CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem
CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200
// Flags for opening a VHD
// Flags for opening a VHD.
OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002
@ -127,7 +128,7 @@ const (
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100
OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200
// Flags for attaching a VHD
// Flags for attaching a VHD.
AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000
AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001
AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002
@ -140,12 +141,14 @@ const (
AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100
AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200
// Flags for detaching a VHD
// Flags for detaching a VHD.
DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0
)
// CreateVhdx is a helper function to create a simple vhdx file at the given path using
// default values.
//
//revive:disable-next-line:var-naming VHDX, not Vhdx
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
params := CreateVirtualDiskParameters{
Version: 2,
@ -172,6 +175,8 @@ func DetachVirtualDisk(handle syscall.Handle) (err error) {
}
// DetachVhd detaches a vhd found at `path`.
//
//revive:disable-next-line:var-naming VHD, not Vhd
func DetachVhd(path string) error {
handle, err := OpenVirtualDisk(
path,
@ -181,12 +186,16 @@ func DetachVhd(path string) error {
if err != nil {
return err
}
defer syscall.CloseHandle(handle)
defer syscall.CloseHandle(handle) //nolint:errcheck
return DetachVirtualDisk(handle)
}
// AttachVirtualDisk attaches a virtual hard disk for use.
func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
func AttachVirtualDisk(
handle syscall.Handle,
attachVirtualDiskFlag AttachVirtualDiskFlag,
parameters *AttachVirtualDiskParameters,
) (err error) {
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
if err := attachVirtualDisk(
handle,
@ -203,6 +212,8 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
//
//revive:disable-next-line:var-naming VHD, not Vhd
func AttachVhd(path string) (err error) {
handle, err := OpenVirtualDisk(
path,
@ -213,7 +224,7 @@ func AttachVhd(path string) (err error) {
return err
}
defer syscall.CloseHandle(handle)
defer syscall.CloseHandle(handle) //nolint:errcheck
params := AttachVirtualDiskParameters{Version: 2}
if err := AttachVirtualDisk(
handle,
@ -226,7 +237,11 @@ func AttachVhd(path string) (err error) {
}
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) {
func OpenVirtualDisk(
vhdPath string,
virtualDiskAccessMask VirtualDiskAccessMask,
openVirtualDiskFlags VirtualDiskFlag,
) (syscall.Handle, error) {
parameters := OpenVirtualDiskParameters{Version: 2}
handle, err := OpenVirtualDiskWithParameters(
vhdPath,
@ -241,7 +256,12 @@ func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask
}
// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters.
func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) {
func OpenVirtualDiskWithParameters(
vhdPath string,
virtualDiskAccessMask VirtualDiskAccessMask,
openVirtualDiskFlags VirtualDiskFlag,
parameters *OpenVirtualDiskParameters,
) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
@ -279,7 +299,12 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
}
// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk.
func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) {
func CreateVirtualDisk(
path string,
virtualDiskAccessMask VirtualDiskAccessMask,
createVirtualDiskFlags CreateVirtualDiskFlag,
parameters *CreateVirtualDiskParameters,
) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
@ -323,6 +348,8 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
}
// CreateDiffVhd is a helper function to create a differencing virtual disk.
//
//revive:disable-next-line:var-naming VHD, not Vhd
func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error {
// Setting `ParentPath` is how to signal to create a differencing disk.
createParams := &CreateVirtualDiskParameters{

View File

@ -1,4 +1,6 @@
// Code generated by 'go generate'; DO NOT EDIT.
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package vhd

View File

@ -1,4 +1,6 @@
// Code generated by 'go generate'; DO NOT EDIT.
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package winio
@ -47,9 +49,11 @@ var (
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
@ -59,7 +63,6 @@ var (
procBackupWrite = modkernel32.NewProc("BackupWrite")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
@ -74,7 +77,6 @@ var (
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procbind = modws2_32.NewProc("bind")
)
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
@ -123,6 +125,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
return
}
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
@ -154,6 +164,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
return
}
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
@ -286,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = errnoErr(e1)
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
@ -380,25 +380,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
status = ntStatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
status = ntStatus(r0)
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
status = ntStatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
@ -417,11 +417,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint
}
return
}
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
err = errnoErr(e1)
}
return
}

View File

@ -1 +1,3 @@
* text=auto eol=lf
* text=auto eol=lf
vendor/** -text
test/vendor/** -text

View File

@ -6,6 +6,7 @@
# Ignore vscode setting files
.vscode/
.idea/
# Test binary, build with `go test -c`
*.test
@ -23,16 +24,26 @@ service/pkg/
*.img
*.vhd
*.tar.gz
*.tar
# Make stuff
.rootfs-done
bin/*
rootfs/*
rootfs-conv/*
*.o
/build/
deps/*
out/*
.idea/
.vscode/
# test results
test/results
# go workspace files
go.work
go.work.sum
# keys and related artifacts
*.pem
*.cose

View File

@ -1,23 +1,51 @@
run:
timeout: 8m
tests: true
build-tags:
- admin
- functional
- integration
skip-dirs:
# paths are relative to module root
- cri-containerd/test-images
linters:
enable:
- stylecheck
# defaults:
# - errcheck
# - gosimple
# - govet
# - ineffassign
# - staticcheck
# - typecheck
# - unused
- gofmt # whether code was gofmt-ed
- nolintlint # ill-formed or insufficient nolint directives
- stylecheck # golint replacement
- thelper # test helpers without t.Helper()
linters-settings:
stylecheck:
# https://staticcheck.io/docs/checks
checks: ["all"]
issues:
# This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like
# (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go
# friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't
# supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
# while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
exclude-rules:
# path is relative to module root, which is ./test/
- path: cri-containerd
linters:
- stylecheck
text: "^ST1003: should not use underscores in package names$"
source: "^package cri_containerd$"
# This repo has a LOT of generated schema files, operating system bindings, and other
# things that ST1003 from stylecheck won't like (screaming case Windows api constants for example).
# There's also some structs that we *could* change the initialisms to be Go friendly
# (Id -> ID) but they're exported and it would be a breaking change.
# This makes it so that most new code, code that isn't supposed to be a pretty faithful
# mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
# while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
- path: layer.go
linters:
- stylecheck
@ -28,11 +56,21 @@ issues:
- stylecheck
Text: "ST1003:"
- path: internal\\hcs\\schema2\\
- path: cmd\\ncproxy\\nodenetsvc\\
linters:
- stylecheck
Text: "ST1003:"
- path: cmd\\ncproxy_mock\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hcs\\schema2\\
linters:
- stylecheck
- gofmt
- path: internal\\wclayer\\
linters:
- stylecheck
@ -96,4 +134,4 @@ issues:
- path: internal\\hcserror\\
linters:
- stylecheck
Text: "ST1003:"
Text: "ST1003:"

View File

@ -1,4 +1,5 @@
BASE:=base.tar.gz
DEV_BUILD:=0
GO:=go
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
@ -12,16 +13,31 @@ GO_FLAGS_EXTRA:=
ifeq "$(GOMODVENDOR)" "1"
GO_FLAGS_EXTRA += -mod=vendor
endif
GO_BUILD_TAGS:=
ifneq ($(strip $(GO_BUILD_TAGS)),)
GO_FLAGS_EXTRA += -tags="$(GO_BUILD_TAGS)"
endif
GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
# additional directories to search for rule prerequisites and targets
VPATH=$(SRCROOT)
DELTA_TARGET=out/delta.tar.gz
ifeq "$(DEV_BUILD)" "1"
DELTA_TARGET=out/delta-dev.tar.gz
endif
# The link aliases for gcstools
GCS_TOOLS=\
generichook
generichook \
install-drivers
.PHONY: all always rootfs test
.DEFAULT_GOAL := all
all: out/initrd.img out/rootfs.tar.gz
clean:
@ -29,21 +45,13 @@ clean:
rm -rf bin deps rootfs out
test:
cd $(SRCROOT) && go test -v ./internal/guest/...
cd $(SRCROOT) && $(GO) test -v ./internal/guest/...
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile
@mkdir -p out
rm -rf rootfs
mkdir -p rootfs/bin/
cp bin/init rootfs/
cp bin/vsockexec rootfs/bin/
cp bin/cmd/gcs rootfs/bin/
cp bin/cmd/gcstools rootfs/bin/
for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \
git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch
tar -zcf $@ -C rootfs .
rm -rf rootfs
rootfs: out/rootfs.vhd
out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4
gzip -f -d ./out/rootfs.tar.gz
bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@
out/rootfs.tar.gz: out/initrd.img
rm -rf rootfs-conv
@ -52,27 +60,43 @@ out/rootfs.tar.gz: out/initrd.img
tar -zcf $@ -C rootfs-conv .
rm -rf rootfs-conv
out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh
$(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed
out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
$(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
gzip -c out/initrd.img.uncompressed > $@
rm out/initrd.img.uncompressed
-include deps/cmd/gcs.gomake
-include deps/cmd/gcstools.gomake
# This target includes utilities which may be useful for testing purposes.
out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
rm -rf rootfs-dev
mkdir rootfs-dev
tar -xzf out/delta.tar.gz -C rootfs-dev
cp bin/internal/tools/snp-report rootfs-dev/bin/
tar -zcf $@ -C rootfs-dev .
rm -rf rootfs-dev
# Implicit rule for includes that define Go targets.
%.gomake: $(SRCROOT)/Makefile
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile
@mkdir -p out
rm -rf rootfs
mkdir -p rootfs/bin/
mkdir -p rootfs/info/
cp bin/init rootfs/
cp bin/vsockexec rootfs/bin/
cp bin/cmd/gcs rootfs/bin/
cp bin/cmd/gcstools rootfs/bin/
cp bin/cmd/hooks/wait-paths rootfs/bin/
for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
git -C $(SRCROOT) rev-parse HEAD > rootfs/info/gcs.commit && \
git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/info/gcs.branch && \
date --iso-8601=minute --utc > rootfs/info/tar.date
$(if $(and $(realpath $(subst .tar,.testdata.json,$(BASE))), $(shell which jq)), \
jq -r '.IMAGE_NAME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/image.name && \
jq -r '.DATETIME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/build.date)
tar -zcf $@ -C rootfs .
rm -rf rootfs
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
@mkdir -p $(dir $@)
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
mv $@.new $@
VPATH=$(SRCROOT)
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
@mkdir -p bin

View File

@ -1,4 +1,4 @@
version = "unstable"
version = "1"
generator = "gogoctrd"
plugins = ["grpc", "fieldpath"]
@ -14,11 +14,6 @@ plugins = ["grpc", "fieldpath"]
# target package.
packages = ["github.com/gogo/protobuf"]
# Paths that will be added untouched to the end of the includes. We use
# `/usr/local/include` to pickup the common install location of protobuf.
# This is the default.
after = ["/usr/local/include"]
# This section maps protobuf imports to Go packages. These will become
# `-M` directives in the call to the go protobuf generator.
[packages]
@ -36,6 +31,10 @@ plugins = ["grpc", "fieldpath"]
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/extendedtask"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
plugins = ["ttrpc"]

View File

@ -75,24 +75,6 @@ certify they either authored the work themselves or otherwise have permission to
more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
that all commits in a given PR are signed-off.
### Test Directory (Important to note)
This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this
project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has
its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file
has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project
(which is the repo itself on your disk).
```
replace (
github.com/Microsoft/hcsshim => ../
)
```
Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the
CI in this project will check if the files are out of date and will fail if this is true.
## Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
@ -101,7 +83,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio
## Dependencies
This project requires Golang 1.9 or newer to build.
This project requires Golang 1.17 or newer to build.
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).

View File

@ -0,0 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -17,8 +19,8 @@ import (
//
// `layerData` is the parent read-only layer data.
func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) {
title := "hcsshim.AttachLayerStorageFilter"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::AttachLayerStorageFilter"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -12,8 +14,8 @@ import (
//
// `layerPath` is a path to a directory containing the layer to export.
func DestroyLayer(ctx context.Context, layerPath string) (err error) {
title := "hcsshim.DestroyLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::DestroyLayer"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -12,8 +14,8 @@ import (
//
// `layerPath` is a path to a directory containing the layer to export.
func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) {
title := "hcsshim.DetachLayerStorageFilter"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::DetachLayerStorageFilter"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -19,8 +21,8 @@ import (
//
// `options` are the export options applied to the exported layer.
func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) {
title := "hcsshim.ExportLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::ExportLayer"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@ -28,17 +30,17 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD
trace.StringAttribute("exportFolderPath", exportFolderPath),
)
ldbytes, err := json.Marshal(layerData)
ldBytes, err := json.Marshal(layerData)
if err != nil {
return err
}
obytes, err := json.Marshal(options)
oBytes, err := json.Marshal(options)
if err != nil {
return err
}
err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes))
err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes))
if err != nil {
return errors.Wrap(err, "failed to export layer")
}

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -5,16 +7,20 @@ import (
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/windows"
)
// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer.
//
// If the VHD is not mounted it will be temporarily mounted.
//
// NOTE: This API had a breaking change in the operating system after Windows Server 2019.
// On ws2019 the API expects to get passed a file handle from CreateFile for the vhd that
// the caller wants to format. On > ws2019, its expected that the caller passes a vhd handle
// that can be obtained from the virtdisk APIs.
func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) {
title := "hcsshim.FormatWritableLayerVhd"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::FormatWritableLayerVhd"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -6,10 +8,12 @@ import (
"path/filepath"
"syscall"
"github.com/Microsoft/go-winio/pkg/security"
"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/internal/memory"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
"github.com/Microsoft/hcsshim/internal/security"
)
const defaultVHDXBlockSizeInMB = 1
@ -59,8 +63,8 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
createParams := &vhd.CreateVirtualDiskParameters{
Version: 2,
Version2: vhd.CreateVersion2{
MaximumSize: sizeInGB * 1024 * 1024 * 1024,
BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024,
MaximumSize: sizeInGB * memory.GiB,
BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB,
},
}
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
@ -135,8 +139,8 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
createParams := &vhd.CreateVirtualDiskParameters{
Version: 2,
Version2: vhd.CreateVersion2{
MaximumSize: sizeInGB * 1024 * 1024 * 1024,
BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024,
MaximumSize: sizeInGB * memory.GiB,
BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB,
},
}
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -19,8 +21,8 @@ import (
//
// `layerData` is the parent layer data.
func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) {
title := "hcsshim.ImportLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::ImportLayer"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -16,8 +18,8 @@ import (
//
// `layerData` is the parent read-only layer data.
func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) {
title := "hcsshim.InitializeWritableLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::InitializeWritableLayer"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -6,14 +8,13 @@ import (
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/windows"
)
// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer.
func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) {
title := "hcsshim.GetLayerVhdMountPath"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::GetLayerVhdMountPath"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()

View File

@ -1,3 +1,5 @@
//go:build windows
package computestorage
import (
@ -21,8 +23,8 @@ import (
//
// `options` are the options applied while processing the layer.
func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) {
title := "hcsshim.SetupBaseOSLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::SetupBaseOSLayer"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@ -48,12 +50,16 @@ func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.H
// `volumePath` is the path to the volume to be used for setup.
//
// `options` are the options applied while processing the layer.
//
// NOTE: This API is only available on builds of Windows greater than 19645. Inside we
// check if the hosts build has the API available by using 'GetVersion' which requires
// the calling application to be manifested. https://docs.microsoft.com/en-us/windows/win32/sbscs/manifests
func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) {
if osversion.Build() < 19645 {
return errors.New("SetupBaseOSVolume is not present on builds older than 19645")
}
title := "hcsshim.SetupBaseOSVolume"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
title := "hcsshim::SetupBaseOSVolume"
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(

View File

@ -7,7 +7,7 @@ import (
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
)
//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go storage.go
//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer?
//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer?
@ -20,10 +20,13 @@ import (
//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath?
//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume?
type Version = hcsschema.Version
type Layer = hcsschema.Layer
// LayerData is the data used to describe parent layer information.
type LayerData struct {
SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"`
Layers []hcsschema.Layer `json:"Layers,omitempty"`
SchemaVersion Version `json:"SchemaVersion,omitempty"`
Layers []Layer `json:"Layers,omitempty"`
}
// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall.

View File

@ -1,4 +1,6 @@
// Code generated mksyscall_windows.exe DO NOT EDIT
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package computestorage
@ -19,6 +21,7 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@ -26,7 +29,7 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
@ -39,42 +42,86 @@ func errnoErr(e syscall.Errno) error {
var (
modcomputestorage = windows.NewLazySystemDLL("computestorage.dll")
procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer")
procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer")
procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer")
procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer")
procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer")
procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter")
procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer")
procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter")
procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer")
procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd")
procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath")
procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer")
procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer")
procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer")
procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume")
)
func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) {
func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(sourceFolderPath)
_p1, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
var _p2 *uint16
_p2, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
return _hcsImportLayer(_p0, _p1, _p2)
return _hcsAttachLayerStorageFilter(_p0, _p1)
}
func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) {
if hr = procHcsImportLayer.Find(); hr != nil {
func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) {
hr = procHcsAttachLayerStorageFilter.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsDestroyLayer(layerPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
return _hcsDestroyLayer(_p0)
}
func _hcsDestroyLayer(layerPath *uint16) (hr error) {
hr = procHcsDestoryLayer.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsDetachLayerStorageFilter(layerPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
return _hcsDetachLayerStorageFilter(_p0)
}
func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
hr = procHcsDetachLayerStorageFilter.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@ -109,7 +156,8 @@ func hcsExportLayer(layerPath string, exportFolderPath string, layerData string,
}
func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) {
if hr = procHcsExportLayer.Find(); hr != nil {
hr = procHcsExportLayer.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0)
@ -122,20 +170,12 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin
return
}
func hcsDestroyLayer(layerPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
hr = procHcsFormatWritableLayerVhd.Find()
if hr != nil {
return
}
return _hcsDestroyLayer(_p0)
}
func _hcsDestroyLayer(layerPath *uint16) (hr error) {
if hr = procHcsDestoryLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@ -145,25 +185,46 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) {
return
}
func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) {
func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) {
hr = procHcsGetLayerVhdMountPath.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(options)
_p1, hr = syscall.UTF16PtrFromString(sourceFolderPath)
if hr != nil {
return
}
return _hcsSetupBaseOSLayer(_p0, handle, _p1)
}
func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) {
if hr = procHcsSetupBaseOSLayer.Find(); hr != nil {
var _p2 *uint16
_p2, hr = syscall.UTF16PtrFromString(layerData)
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
return _hcsImportLayer(_p0, _p1, _p2)
}
func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) {
hr = procHcsImportLayer.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@ -193,7 +254,8 @@ func hcsInitializeWritableLayer(writableLayerPath string, layerData string, opti
}
func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) {
if hr = procHcsInitializeWritableLayer.Find(); hr != nil {
hr = procHcsInitializeWritableLayer.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
@ -206,76 +268,26 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o
return
}
func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) {
func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(layerData)
_p1, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsAttachLayerStorageFilter(_p0, _p1)
return _hcsSetupBaseOSLayer(_p0, handle, _p1)
}
func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) {
if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsDetachLayerStorageFilter(layerPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(layerPath)
func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) {
hr = procHcsSetupBaseOSLayer.Find()
if hr != nil {
return
}
return _hcsDetachLayerStorageFilter(_p0)
}
func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) {
if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0)
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
@ -305,7 +317,8 @@ func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (
}
func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) {
if hr = procHcsSetupBaseOSVolume.Find(); hr != nil {
hr = procHcsSetupBaseOSVolume.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))

View File

@ -1,3 +1,5 @@
//go:build windows
package hcsshim
import (
@ -60,7 +62,7 @@ type container struct {
waitCh chan struct{}
}
// createComputeSystemAdditionalJSON is read from the environment at initialisation
// createContainerAdditionalJSON is read from the environment at initialization
// time. It allows an environment variable to define additional JSON which
// is merged in the CreateComputeSystem call to HCS.
var createContainerAdditionalJSON []byte

View File

@ -1,3 +1,5 @@
//go:build windows
package hcsshim
import (
@ -50,6 +52,9 @@ var (
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = hcs.ErrUnexpectedValue
// ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied
ErrOperationDenied = hcs.ErrOperationDenied
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped

View File

@ -1,12 +0,0 @@
# Requirements so far:
# dockerd running
# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar
# - image alpine (linux) docker pull --platform=linux alpine
# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true"
#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please"
#pushd uvm
go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./...
#popd

View File

@ -1,15 +1,17 @@
//go:build windows
// Shim for the Host Compute Service (HCS) to manage Windows Server
// containers and Hyper-V containers.
package hcsshim
import (
"syscall"
"golang.org/x/sys/windows"
"github.com/Microsoft/hcsshim/internal/hcserror"
)
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hcsshim.go
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
@ -17,9 +19,9 @@ const (
// Specific user-visible exit codes
WaitErrExecFailed = 32767
ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
WSAEINVAL = syscall.Errno(10022)
ERROR_GEN_FAILURE = windows.ERROR_GEN_FAILURE
ERROR_SHUTDOWN_IN_PROGRESS = windows.ERROR_SHUTDOWN_IN_PROGRESS
WSAEINVAL = windows.WSAEINVAL
// Timeout on wait calls
TimeoutInfinite = 0xFFFFFFFF

View File

@ -1,3 +1,5 @@
//go:build windows
package hcsshim
import (
@ -13,7 +15,7 @@ type HNSEndpointStats = hns.EndpointStats
// Namespace represents a Compartment.
type Namespace = hns.Namespace
//SystemType represents the type of the system on which actions are done
// SystemType represents the type of the system on which actions are done
type SystemType string
// SystemType const

View File

@ -1,3 +1,5 @@
//go:build windows
package hcsshim
import (

View File

@ -1,14 +1,16 @@
//go:build windows
package hcsshim
import (
"github.com/Microsoft/hcsshim/internal/hns"
)
// Subnet is assoicated with a network and represents a list
// Subnet is associated with a network and represents a list
// of subnets available to the network
type Subnet = hns.Subnet
// MacPool is assoicated with a network and represents a list
// MacPool is associated with a network and represents a list
// of macaddresses available to the network
type MacPool = hns.MacPool

Some files were not shown because too many files have changed in this diff Show More