fix(deps): update module github.com/containers/storage to v1.58.0

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot] 2025-04-16 15:55:29 +00:00 committed by GitHub
parent 32b8827d78
commit ed34be71c6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
118 changed files with 1561 additions and 1252 deletions

22
go.mod
View File

@ -10,7 +10,7 @@ require (
github.com/containers/common v0.62.3
github.com/containers/image/v5 v5.34.3
github.com/containers/ocicrypt v1.2.1
github.com/containers/storage v1.57.2
github.com/containers/storage v1.58.0
github.com/docker/distribution v2.8.3+incompatible
github.com/moby/sys/capability v0.4.0
github.com/opencontainers/go-digest v1.0.0
@ -26,13 +26,13 @@ require (
require (
dario.cat/mergo v1.0.1 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/cgroups/v3 v3.0.5 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
@ -40,7 +40,7 @@ require (
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker v27.5.1+incompatible // indirect
@ -64,7 +64,7 @@ require (
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-containerregistry v0.20.2 // indirect
github.com/google/go-intervals v0.0.2 // indirect
@ -76,7 +76,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@ -87,12 +87,12 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/user v0.3.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opencontainers/selinux v1.11.1 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect
github.com/opencontainers/selinux v1.12.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
github.com/pkg/errors v0.9.1 // indirect
@ -114,7 +114,7 @@ require (
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vbatts/tar-split v0.11.7 // indirect
github.com/vbatts/tar-split v0.12.1 // indirect
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
@ -127,7 +127,7 @@ require (
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.36.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/text v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect

53
go.sum
View File

@ -6,8 +6,8 @@ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dY
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
@ -29,8 +29,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@ -49,15 +49,15 @@ github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYgle
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U=
github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc=
github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo=
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -128,8 +128,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -150,8 +150,9 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
@ -186,8 +187,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -219,8 +220,8 @@ github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCnd
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -244,10 +245,10 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4=
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
@ -325,8 +326,8 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHT
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
@ -429,8 +430,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -524,9 +525,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Documentation: https://godocs.io/github.com/BurntSushi/toml
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show

View File

@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
return md.unify(primValue.undecoded, rvalue(v))
}
// markDecodedRecursive is a helper to mark any key under the given tmap as
// decoded, recursing as needed
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
for key := range tmap {
md.decoded[md.context.add(key).String()] = struct{}{}
if tmap, ok := tmap[key].(map[string]any); ok {
md.context = append(md.context, key)
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
}
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
if err != nil {
return md.parseErr(err)
}
// Assume the Unmarshaler decoded everything, so mark all keys under
// this table as decoded.
if tmap, ok := data.(map[string]any); ok {
markDecodedRecursive(md, tmap)
}
if aot, ok := data.([]map[string]any); ok {
for _, tmap := range aot {
markDecodedRecursive(md, tmap)
}
}
return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error {
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
d := string(md.data)
return ParseError{
LastKey: k,
Position: md.keyInfo[k].pos,
Line: md.keyInfo[k].pos.Line,
Message: err.Error(),
err: err,
input: string(md.data),
LastKey: k,
Position: md.keyInfo[k].pos.withCol(d),
Line: md.keyInfo[k].pos.Line,
input: d,
}
}

View File

@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
mapKeysSub = append(mapKeysSub, mapKey)
} else {
mapKeysDirect = append(mapKeysDirect, k)
mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
enc.encode(key.add(mapKey.String()), val)
}
}
}
@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
if is32Bit {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
// Need to make a copy because ... ehm, I don't know why... I guess
// allocating a new array can cause it to fail(?)
//
// Done for: https://github.com/BurntSushi/toml/issues/430
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
}
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
addFields(rt, rv, nil)
writeFields := func(fields [][]int) {
writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := rv.FieldByIndex(fieldIndex)
@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
l := len(fieldsDirect) + len(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}

View File

@ -67,21 +67,36 @@ type ParseError struct {
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght in bytes.
Len int // Length of the error in bytes.
}
func (p Position) withCol(tomlFile string) Position {
var (
pos int
lines = strings.Split(tomlFile, "\n")
)
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= p.Start {
p.Col = p.Start - pos + 1
if p.Col < 1 { // Should never happen, but just in case.
p.Col = 1
}
break
}
pos += ll
}
return p
}
func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, msg)
pe.Position.Line, pe.LastKey, pe.Message)
}
// ErrorWithPosition returns the error with detailed location context.
@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error()
}
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
var (
lines = strings.Split(pe.input, "\n")
b = new(strings.Builder)
)
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
msg, pe.Position.Line, col+1)
pe.Message, pe.Position.Line, pe.Position.Col)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
msg, pe.Position.Line, col, col+pe.Position.Len)
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
diff := len(expanded) - len(lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
return m
}
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
func expandTab(s string) string {
var (
b strings.Builder

View File

@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
pos.Line--
}
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default:
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
}
return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
}
return lx.errorf("expected value but found %q instead", r)
}
@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
case 'x':
r = lx.peek()
if !isHex(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool {
if tomlNext {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-' ||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
(r >= 0x037f && r <= 0x1fff) ||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
(r >= 0x10000 && r <= 0xeffff)
}
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || r == '_' || r == '-'
}

View File

@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
if cap(k) > len(k) {
return append(k, piece)
}
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece

View File

@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
// it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:]
}
@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Start: i, Len: 1},
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
Message: err.Error(),
err: err,
Position: it.pos,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: it.pos,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: p.pos,
Position: p.pos.withCol(p.lx.input),
Line: p.pos.Line,
LastKey: p.current(),
})
@ -123,10 +123,11 @@ func (p *parser) next() item {
if it.typ == itemError {
if it.err != nil {
panic(ParseError{
Position: it.pos,
Message: it.err.Error(),
err: it.err,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line,
LastKey: p.current(),
err: it.err,
})
}
@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
}
}
// isHexis a superset of all the permissable characters surrounding an
// isHex is a superset of all the permissible characters surrounding an
// underscore.
accept = isHex(r)
}

View File

@ -23,7 +23,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -126,7 +126,7 @@ lint_task:
folder: $GOPATH/pkg/mod
build_script: |
apt-get update
apt-get install -y libbtrfs-dev
apt-get install -y libbtrfs-dev libsubid-dev
test_script: |
make TAGS=regex_precompile local-validate
make lint
@ -170,13 +170,13 @@ vendor_task:
cross_task:
alias: cross
container:
image: golang:1.22
image: golang:1.23
build_script: make cross
gofix_task:
alias: gofix
container:
image: golang:1.22
image: golang:1.23
build_script: go fix ./...
test_script: git diff --exit-code

View File

@ -1,7 +1,20 @@
---
run:
concurrency: 6
timeout: 5m
linters:
version: "2"
formatters:
enable:
- gofumpt
linters:
enable:
- nolintlint
- unconvert
exclusions:
presets:
- comments
- std-error-handling
settings:
staticcheck:
checks:
- all
- -ST1003 # https://staticcheck.dev/docs/checks/#ST1003 Poorly chosen identifier.
- -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression.

View File

@ -1,119 +1,10 @@
# Contributing to Containers/Storage
We'd love to have you join the community! Below summarizes the processes
that we follow.
We'd love to have you join the community! [Learn how to contribute](https://github.com/containers/common/blob/main/CONTRIBUTING.md) to the Containers Group Projects.
## Topics
Please note that the following information is specific to this project:
* [Reporting Issues](#reporting-issues)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Communications](#communications)
<!--
* [Becoming a Maintainer](#becoming-a-maintainer)
-->
## Reporting Issues
Before reporting an issue, check our backlog of
[open issues](https://github.com/containers/storage/issues)
to see if someone else has already reported it. If so, feel free to add
your scenario, or additional information, to the discussion. Or simply
"subscribe" to it to be notified when it is updated.
If you find a new issue with the project we'd love to hear about it! The most
important aspect of a bug report is that it includes enough information for
us to reproduce it. So, please include as much detail as possible and try
to remove the extra stuff that doesn't really relate to the issue itself.
The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
new testcases, bug fixes, new features, more documentation, ... it's all
welcome!
While bug fixes can first be identified via an "issue", that is not required.
It's ok to just open up a PR with the fix, but make sure you include the same
information you would have included in an issue - like how to reproduce it.
PRs for new features should include some background on what use cases the
new code is trying to address. When possible and when it makes sense, try to break-up
larger PRs into smaller ones - it's easier to review smaller
code changes. But only if those smaller ones make sense as stand-alone PRs.
Regardless of the type of PR, all PRs should include:
* well documented code changes
* additional testcases. Ideally, they should fail w/o your code change applied
* documentation changes
Squash your commits into logical pieces of work that might want to be reviewed
separate from the rest of the PRs. But, squashing down to just one commit is ok
too since in the end the entire PR will be reviewed anyway. When in doubt,
squash.
PRs that fix issues should include a reference like `Closes #XXXX` in the
commit message so that github will automatically close the referenced issue
when the PR is merged.
<!--
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
-->
### Sign your PRs
The sign-off is a line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
* We dont typically require 2 LGTMs for this repository.
## Communications

View File

@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 1.64.5
GOLANGCI_LINT_VERSION := 2.0.2
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs

View File

@ -1 +1 @@
1.57.2
1.58.0

View File

@ -82,7 +82,7 @@ type Container struct {
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
Flags map[string]any `json:"flags,omitempty"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
@ -196,7 +196,7 @@ func (c *Container) MountOpts() []string {
switch value := c.Flags[mountOptsFlag].(type) {
case []string:
return value
case []interface{}:
case []any:
var mountOpts []string
for _, v := range value {
if flag, ok := v.(string); ok {
@ -458,7 +458,7 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
ids := make(map[string]*Container)
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
for locationIndex := range numContainerLocationIndex {
location := containerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
@ -531,7 +531,7 @@ func (r *containerStore) save(saveLocations containerLocations) error {
return err
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
for locationIndex := range numContainerLocationIndex {
location := containerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
@ -641,13 +641,13 @@ func (r *containerStore) ClearFlag(id string, flag string) error {
}
// Requires startWriting.
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
func (r *containerStore) SetFlag(id string, flag string, value any) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
if container.Flags == nil {
container.Flags = make(map[string]interface{})
container.Flags = make(map[string]any)
}
container.Flags[flag] = value
return r.saveFor(container)

View File

@ -111,7 +111,7 @@ type LayerBigDataStore interface {
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type FlaggableStore interface {
ClearFlag(id string, flag string) error
SetFlag(id string, flag string, value interface{}) error
SetFlag(id string, flag string, value any) error
}
// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
@ -195,8 +195,8 @@ type LayerStore interface {
FlaggableStore
RWLayerBigDataStore
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any) (layer *Layer, err error)
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any, diff io.Reader) (*Layer, int64, error)
SetNames(id string, names []string) error
AddNames(id string, names []string) error
RemoveNames(id string, names []string) error

View File

@ -517,7 +517,7 @@ func (a *Driver) isParent(id, parent string) bool {
if parent == "" && len(parents) > 0 {
return false
}
return !(len(parents) > 0 && parent != parents[0])
return len(parents) == 0 || parent == parents[0]
}
// Diff produces an archive of the changes between the specified
@ -778,6 +778,6 @@ func (a *Driver) SupportsShifting() bool {
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
func (a *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}

View File

@ -1,4 +1,4 @@
//go:build linux && !btrfs_noversion && cgo
//go:build linux && cgo
package btrfs

View File

@ -1,14 +0,0 @@
//go:build linux && btrfs_noversion && cgo
package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string {
return "-"
}
func btrfsLibVersion() int {
return -1
}

View File

@ -36,8 +36,8 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
}
i := inode{
Dev: uint64(st.Dev),
Ino: uint64(st.Ino),
Dev: uint64(st.Dev), //nolint:unconvert
Ino: st.Ino,
}
c.mutex.Lock()

View File

@ -40,7 +40,7 @@ const (
)
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive
srcFile, err := os.Open(srcPath)
if err != nil {
return err
@ -72,7 +72,7 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive
// If the destination file already exists, we shouldn't blow it away
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
if err != nil {
@ -160,7 +160,10 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
switch mode := f.Mode(); {
case mode.IsRegular():
id := fileID{dev: uint64(stat.Dev), ino: stat.Ino}
id := fileID{
dev: uint64(stat.Dev), //nolint:unconvert
ino: stat.Ino,
}
if copyMode == Hardlink {
isHardlink = true
if err2 := os.Link(srcPath, dstPath); err2 != nil {
@ -242,12 +245,11 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
// nolint: unconvert
if f.IsDir() {
dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
} else if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
aTime := time.Unix(stat.Atim.Unix())
mTime := time.Unix(stat.Mtim.Unix())
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
return err
}

View File

@ -24,7 +24,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
}
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
f, err := os.Open(srcPath)
if err != nil {
return err
@ -35,6 +35,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters"
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters"
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
}

View File

@ -54,8 +54,8 @@ type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap //nolint: revive,golint
GidMaps []idtools.IDMap //nolint: revive,golint
UidMaps []idtools.IDMap //nolint: revive
GidMaps []idtools.IDMap //nolint: revive
Options []string
// Volatile specifies whether the container storage can be optimized
@ -79,7 +79,7 @@ type ApplyDiffOpts struct {
type ApplyDiffWithDifferOpts struct {
ApplyDiffOpts
Flags map[string]interface{}
Flags map[string]any
}
// DedupArgs contains the information to perform storage deduplication.
@ -222,7 +222,7 @@ type DriverWithDifferOutput struct {
RootDirMode *os.FileMode
// Artifacts is a collection of additional artifacts
// generated by the differ that the storage driver can use.
Artifacts map[string]interface{}
Artifacts map[string]any
}
type DifferOutputFormat int

View File

@ -259,7 +259,7 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
}
defer cleanupFunc()
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, pid); err != nil {
return false, fmt.Errorf("create mapped mount: %w", err)
}
defer func() {

View File

@ -42,7 +42,7 @@ func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
func generateComposeFsBlob(verityDigests map[string]string, toc any, composefsDir string) error {
if err := os.MkdirAll(composefsDir, 0o700); err != nil {
return err
}
@ -53,7 +53,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
}
destFile := getComposefsBlob(composefsDir)
writerJson, err := getComposeFsHelper()
writerJSON, err := getComposeFsHelper()
if err != nil {
return fmt.Errorf("failed to find mkcomposefs: %w", err)
}
@ -74,7 +74,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
defer outFile.Close()
errBuf := &bytes.Buffer{}
cmd := exec.Command(writerJson, "--from-file", "-", "-")
cmd := exec.Command(writerJSON, "--from-file", "-", "-")
cmd.Stderr = errBuf
cmd.Stdin = dumpReader
cmd.Stdout = outFile

View File

@ -36,7 +36,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
@ -131,6 +130,9 @@ type Driver struct {
usingMetacopy bool
usingComposefs bool
stagingDirsLocksMutex sync.Mutex
// stagingDirsLocks access is not thread safe, it is required that callers take
// stagingDirsLocksMutex on each access to guard against concurrent map writes.
stagingDirsLocks map[string]*lockfile.LockFile
supportsIDMappedMounts *bool
@ -419,7 +421,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if !opts.skipMountHome {
if err := mount.MakePrivate(home); err != nil {
return nil, err
return nil, fmt.Errorf("overlay: failed to make mount private: %w", err)
}
}
@ -439,6 +441,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
supportsVolatile: supportsVolatile,
usingComposefs: opts.useComposefs,
options: *opts,
stagingDirsLocksMutex: sync.Mutex{},
stagingDirsLocks: make(map[string]*lockfile.LockFile),
}
@ -489,7 +492,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
o.quota.Inodes = uint64(inodes)
o.quota.Inodes = inodes
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
@ -640,6 +643,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
case "true":
logrus.Debugf("overlay: storage already configured with a mount-program")
return false, nil
case "false":
// Do nothing.
default:
needsMountProgram, err := scanForMountProgramIndicators(home)
if err != nil && !os.IsNotExist(err) {
@ -653,7 +658,6 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
}
// fall through to check if we find ourselves needing to use a
// mount program now
case "false":
}
for _, dir := range []string{home, runhome} {
@ -868,10 +872,12 @@ func (d *Driver) Cleanup() error {
// pruneStagingDirectories cleans up any staging directory that was leaked.
// It returns whether any staging directory is still present.
func (d *Driver) pruneStagingDirectories() bool {
d.stagingDirsLocksMutex.Lock()
for _, lock := range d.stagingDirsLocks {
lock.Unlock()
}
d.stagingDirsLocks = make(map[string]*lockfile.LockFile)
clear(d.stagingDirsLocks)
d.stagingDirsLocksMutex.Unlock()
anyPresent := false
@ -1157,7 +1163,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
if err != nil {
return err
}
driver.options.quota.Inodes = uint64(inodes)
driver.options.quota.Inodes = inodes
default:
return fmt.Errorf("unknown option %s", key)
}
@ -1343,7 +1349,7 @@ func (d *Driver) recreateSymlinks() error {
return err
}
// Keep looping as long as we take some corrective action in each iteration
var errs *multierror.Error
var errs error
madeProgress := true
iterations := 0
for madeProgress {
@ -1359,7 +1365,7 @@ func (d *Driver) recreateSymlinks() error {
// Read the "link" file under each layer to get the name of the symlink
data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
errs = errors.Join(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
}
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
@ -1368,12 +1374,12 @@ func (d *Driver) recreateSymlinks() error {
err = fileutils.Lexists(linkPath)
if err != nil && os.IsNotExist(err) {
if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
madeProgress = true
} else if err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
}
@ -1384,7 +1390,7 @@ func (d *Driver) recreateSymlinks() error {
// that each symlink we have corresponds to one.
links, err := os.ReadDir(linkDirFullPath)
if err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
// Go through all of the symlinks in the "l" directory
@ -1392,16 +1398,16 @@ func (d *Driver) recreateSymlinks() error {
// Read the symlink's target, which should be "../$layer/diff"
target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name()))
if err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
targetComponents := strings.Split(target, string(os.PathSeparator))
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
errs = errors.Join(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
// force the link to be recreated on the next pass
if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil {
if !os.IsNotExist(err) {
errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err))
errs = errors.Join(errs, fmt.Errorf("removing link %q: %w", link, err))
} // else dont report any error, but also dont set madeProgress.
continue
}
@ -1417,7 +1423,7 @@ func (d *Driver) recreateSymlinks() error {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil {
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
errs = errors.Join(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
madeProgress = true
@ -1425,14 +1431,11 @@ func (d *Driver) recreateSymlinks() error {
}
iterations++
if iterations >= maxIterations {
errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph drivers recreateSymlink, giving up", iterations))
errs = errors.Join(errs, fmt.Errorf("reached %d iterations in overlay graph drivers recreateSymlink, giving up", iterations))
break
}
}
if errs != nil {
return errs.ErrorOrNil()
}
return nil
return errs
}
// Get creates and mounts the required file system for the given id and returns the mount path.
@ -1548,7 +1551,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
permsKnown := false
st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil {
perms = os.FileMode(st.Mode())
perms = st.Mode()
permsKnown = true
}
for err == nil {
@ -1563,7 +1566,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err != nil {
return "", err
}
idmappedMountProcessPid = int(pid)
idmappedMountProcessPid = pid
defer cleanupFunc()
}
@ -1635,7 +1638,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown {
perms = os.FileMode(st2.Mode())
perms = st2.Mode()
permsKnown = true
}
break
@ -1656,7 +1659,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
} else {
if !permsKnown {
perms = os.FileMode(st.Mode())
perms = st.Mode()
permsKnown = true
}
lower = newpath
@ -2103,17 +2106,16 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist)
}
func (g *overlayFileGetter) Close() error {
var errs *multierror.Error
func (g *overlayFileGetter) Close() (errs error) {
for _, f := range g.composefsMounts {
if err := f.Close(); err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
}
if err := unix.Rmdir(f.Name()); err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
}
}
return errs.ErrorOrNil()
return errs
}
// newStagingDir creates a new staging directory and returns the path to it.
@ -2173,10 +2175,12 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error)
func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
parentStagingDir := filepath.Dir(stagingDirectory)
d.stagingDirsLocksMutex.Lock()
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
delete(d.stagingDirsLocks, parentStagingDir)
lock.Unlock()
}
d.stagingDirsLocksMutex.Unlock()
return os.RemoveAll(parentStagingDir)
}
@ -2235,11 +2239,15 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
}
defer func() {
if errRet != nil {
d.stagingDirsLocksMutex.Lock()
delete(d.stagingDirsLocks, layerDir)
d.stagingDirsLocksMutex.Unlock()
lock.Unlock()
}
}()
d.stagingDirsLocksMutex.Lock()
d.stagingDirsLocks[layerDir] = lock
d.stagingDirsLocksMutex.Unlock()
lock.Lock()
logrus.Debugf("Applying differ in %s", applyDir)
@ -2271,10 +2279,12 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr
parentStagingDir := filepath.Dir(stagingDirectory)
defer func() {
d.stagingDirsLocksMutex.Lock()
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
delete(d.stagingDirsLocks, parentStagingDir)
lock.Unlock()
}
d.stagingDirsLocksMutex.Unlock()
}()
diffPath, err := d.getDiffPath(id)
@ -2495,7 +2505,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
perms = *d.options.forceMask
} else {
if err == nil {
perms = os.FileMode(st.Mode())
perms = st.Mode()
}
}
for err == nil {

View File

@ -190,7 +190,8 @@ func NewControl(basePath string) (*Control, error) {
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
// for that project id.
// targetPath must exist, must be a directory, and must be empty.
func (q *Control) SetQuota(targetPath string, quota Quota) error {
var projectID uint32
value, ok := q.quotas.Load(targetPath)
@ -200,10 +201,20 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
if !ok {
projectID = q.nextProjectID
// The directory we are setting an ID on must be empty, as
// the ID will not be propagated to pre-existing subdirectories.
dents, err := os.ReadDir(targetPath)
if err != nil {
return fmt.Errorf("reading directory %s: %w", targetPath, err)
}
if len(dents) > 0 {
return fmt.Errorf("can only set project ID on empty directories, %s is not empty", targetPath)
}
//
// assign project id to new container directory
//
err := setProjectID(targetPath, projectID)
err = setProjectID(targetPath, projectID)
if err != nil {
return err
}

View File

@ -93,7 +93,7 @@ type Image struct {
// ReadOnly is true if this image resides in a read-only layer store.
ReadOnly bool `json:"-"`
Flags map[string]interface{} `json:"flags,omitempty"`
Flags map[string]any `json:"flags,omitempty"`
}
// roImageStore provides bookkeeping for information about Images.
@ -675,7 +675,7 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
}
// Requires startWriting.
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
func (r *imageStore) SetFlag(id string, flag string, value any) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
@ -684,7 +684,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
image.Flags = make(map[string]any)
}
image.Flags[flag] = value
return r.Save()

View File

@ -13,7 +13,7 @@ import (
"github.com/sirupsen/logrus"
)
var notSupported = errors.New("reflinks are not supported on this platform")
var errNotSupported = errors.New("reflinks are not supported on this platform")
const (
DedupHashInvalid DedupHashMethod = iota
@ -134,7 +134,7 @@ func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
break
}
logrus.Debugf("Failed to deduplicate: %v", err)
if errors.Is(err, notSupported) {
if errors.Is(err, errNotSupported) {
return dedupBytes, err
}
}
@ -153,7 +153,7 @@ func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
return nil
}); err != nil {
// if reflinks are not supported, return immediately without errors
if errors.Is(err, notSupported) {
if errors.Is(err, errNotSupported) {
return res, nil
}
return res, err

View File

@ -48,7 +48,7 @@ func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
if !ok {
return false, fmt.Errorf("unable to get raw syscall.Stat_t data")
}
return d.recordInode(uint64(st.Dev), st.Ino)
return d.recordInode(uint64(st.Dev), st.Ino) //nolint:unconvert
}
// dedup deduplicates the file at src path to dst path
@ -94,11 +94,11 @@ func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
}
err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
if err == nil {
return uint64(value.Info[0].Bytes_deduped), nil
return value.Info[0].Bytes_deduped, nil
}
if errors.Is(err, unix.ENOTSUP) {
return 0, notSupported
return 0, errNotSupported
}
return 0, fmt.Errorf("failed to clone file %q: %w", src, err)
}

View File

@ -9,19 +9,19 @@ import (
type dedupFiles struct{}
func newDedupFiles() (*dedupFiles, error) {
return nil, notSupported
return nil, errNotSupported
}
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
return false, notSupported
return false, errNotSupported
}
// dedup deduplicates the file at src path to dst path
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
return 0, notSupported
return 0, errNotSupported
}
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
return "", notSupported
return "", errNotSupported
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"maps"
"math/bits"
"os"
"path"
"path/filepath"
@ -26,7 +27,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/tarlog"
"github.com/containers/storage/pkg/truncindex"
multierror "github.com/hashicorp/go-multierror"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux"
@ -47,11 +47,13 @@ const (
type layerLocations uint8
// The backing store is split in two json files, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
// The backing store is split in three json files.
// The volatile store is written without fsync() meaning it isn't as robust to unclean shutdown.
// Optionally, an image store can be configured to store RO layers.
// The stable store is used for the remaining layers that don't go into the other stores.
const (
stableLayerLocation layerLocations = 1 << iota
imageStoreLayerLocation
volatileLayerLocation
numLayerLocationIndex = iota
@ -61,6 +63,10 @@ func layerLocationFromIndex(index int) layerLocations {
return 1 << index
}
func indexFromLayerLocation(location layerLocations) int {
return bits.TrailingZeros(uint(location))
}
// A Layer is a record of a copy-on-write layer that's stored by the lower
// level graph driver.
type Layer struct {
@ -155,7 +161,7 @@ type Layer struct {
GIDs []uint32 `json:"gidset,omitempty"`
// Flags is arbitrary data about the layer.
Flags map[string]interface{} `json:"flags,omitempty"`
Flags map[string]any `json:"flags,omitempty"`
// UIDMap and GIDMap are used for setting up a layer's contents
// for use inside of a user namespace where UID mapping is being used.
@ -165,8 +171,8 @@ type Layer struct {
// ReadOnly is true if this layer resides in a read-only layer store.
ReadOnly bool `json:"-"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
// location is the location of the store where the layer is present.
location layerLocations `json:"-"`
// BigDataNames is a list of names of data items that we keep for the
// convenience of the caller. They can be large, and are only in
@ -431,14 +437,6 @@ type layerStore struct {
driver drivers.Driver
}
// The caller must hold r.inProcessLock for reading.
func layerLocation(l *Layer) layerLocations {
if l.volatileStore {
return volatileLayerLocation
}
return stableLayerLocation
}
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
@ -456,7 +454,7 @@ func copyLayer(l *Layer) *Layer {
TOCDigest: l.TOCDigest,
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
location: l.location,
BigDataNames: copySlicePreferringNil(l.BigDataNames),
Flags: copyMapPreferringNil(l.Flags),
UIDMap: copySlicePreferringNil(l.UIDMap),
@ -658,8 +656,12 @@ func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
// If the layers.json file or container-layers.json has been
// modified manually, then we have to reload the storage in
// any case.
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
info, err := os.Stat(r.jsonPath[locationIndex])
for locationIndex := range numLayerLocationIndex {
rpath := r.jsonPath[locationIndex]
if rpath == "" {
continue
}
info, err := os.Stat(rpath)
if err != nil && !os.IsNotExist(err) {
return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
}
@ -792,9 +794,12 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
layers := []*Layer{}
ids := make(map[string]*Layer)
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
for locationIndex := range numLayerLocationIndex {
location := layerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
if rpath == "" {
continue
}
info, err := os.Stat(rpath)
if err != nil {
if !os.IsNotExist(err) {
@ -821,9 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
continue // skip invalid duplicated layer
}
// Remember where the layer came from
if location == volatileLayerLocation {
layer.volatileStore = true
}
layer.location = location
layers = append(layers, layer)
ids[layer.ID] = layer
}
@ -844,7 +847,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
errorToResolveBySaving = ErrDuplicateLayerNames
modifiedLocations |= layerLocation(conflict)
modifiedLocations |= conflict.location
}
names[name] = layers[n]
}
@ -919,7 +922,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
var layersToDelete []*Layer
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
if layerHasIncompleteFlag(layer) {
// Important: Do not call r.deleteInternal() here. It modifies r.layers
@ -937,10 +940,10 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
// Don't return the error immediately, because deleteInternal does not saveLayers();
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
// deleted incomplete layers have their metadata correctly removed.
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
incompleteDeletionErrors = errors.Join(incompleteDeletionErrors,
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
}
modifiedLocations |= layerLocation(layer)
modifiedLocations |= layer.location
}
if err := r.saveLayers(modifiedLocations); err != nil {
return false, err
@ -1009,7 +1012,7 @@ func (r *layerStore) save(saveLocations layerLocations) error {
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
return r.save(layerLocation(modifiedLayer))
return r.save(modifiedLayer.location)
}
// The caller must hold r.lockfile locked for writing.
@ -1028,18 +1031,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
for locationIndex := range numLayerLocationIndex {
location := layerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if rpath == "" {
return fmt.Errorf("internal error: no path for location %v", location)
}
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
subsetLayers := make([]*Layer, 0, len(r.layers))
for _, layer := range r.layers {
if layerLocation(layer) == location {
if layer.location == location {
subsetLayers = append(subsetLayers, layer)
}
}
@ -1139,12 +1145,17 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.
if transient {
volatileDir = rundir
}
layersImageDir := ""
if imagedir != "" {
layersImageDir = filepath.Join(imagedir, "layers.json")
}
rlstore := layerStore{
lockfile: newMultipleLockFile(lockFiles...),
mountsLockfile: mountsLockfile,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
layersImageDir,
filepath.Join(volatileDir, "volatile-layers.json"),
},
layerdir: layerdir,
@ -1182,6 +1193,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
"",
filepath.Join(layerdir, "volatile-layers.json"),
},
layerdir: layerdir,
@ -1249,7 +1261,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
}
// Requires startWriting.
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
func (r *layerStore) SetFlag(id string, flag string, value any) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
@ -1258,7 +1270,7 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
return ErrLayerUnknown
}
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
layer.Flags[flag] = value
return r.saveFor(layer)
@ -1330,6 +1342,17 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
return copyLayer(layer), nil
}
func (r *layerStore) pickStoreLocation(volatile, writeable bool) layerLocations {
switch {
case volatile:
return volatileLayerLocation
case !writeable && r.jsonPath[indexFromLayerLocation(imageStoreLayerLocation)] != "":
return imageStoreLayerLocation
default:
return stableLayerLocation
}
}
// Requires startWriting.
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
if moreOptions == nil {
@ -1422,7 +1445,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
BigDataNames: []string{},
volatileStore: moreOptions.Volatile,
location: r.pickStoreLocation(moreOptions.Volatile, writeable),
}
layer.Flags[incompleteFlag] = true
@ -1908,7 +1931,7 @@ func (r *layerStore) deleteInternal(id string) error {
// Ensure that if we are interrupted, the layer will be cleaned up.
if !layerHasIncompleteFlag(layer) {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
layer.Flags[incompleteFlag] = true
if err := r.saveFor(layer); err != nil {
@ -2256,33 +2279,33 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
// but they modify in-memory state.
fgetter, err := r.newFileGetter(to)
if err != nil {
errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
errs := fmt.Errorf("creating file-getter: %w", err)
if err := decompressor.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err))
}
if err := tsfile.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err))
}
return nil, errs.ErrorOrNil()
return nil, errs
}
tarstream := asm.NewOutputTarStream(fgetter, metadata)
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
var errs *multierror.Error
var errs error
if err := decompressor.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err))
}
if err := tsfile.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err))
}
if err := tarstream.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
}
if err := fgetter.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing file-getter: %w", err))
}
if errs != nil {
return errs.ErrorOrNil()
return errs
}
return nil
})
@ -2452,16 +2475,12 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
for uid := range uidLog {
layer.UIDs = append(layer.UIDs, uid)
}
sort.Slice(layer.UIDs, func(i, j int) bool {
return layer.UIDs[i] < layer.UIDs[j]
})
slices.Sort(layer.UIDs)
layer.GIDs = make([]uint32, 0, len(gidLog))
for gid := range gidLog {
layer.GIDs = append(layer.GIDs, gid)
}
sort.Slice(layer.GIDs, func(i, j int) bool {
return layer.GIDs[i] < layer.GIDs[j]
})
slices.Sort(layer.GIDs)
err = r.saveFor(layer)
@ -2517,7 +2536,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
layer.Metadata = diffOutput.Metadata
if options != nil && options.Flags != nil {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
maps.Copy(layer.Flags, options.Flags)
}

View File

@ -16,6 +16,7 @@ import (
"strings"
"sync"
"syscall"
"time"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
@ -52,7 +53,7 @@ type (
// This is additional data to be used by the converter. It will
// not survive a round trip through JSON, so it's primarily
// intended for generating archives (i.e., converting writes).
WhiteoutData interface{}
WhiteoutData any
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
@ -67,6 +68,8 @@ type (
CopyPass bool
// ForceMask, if set, indicates the permission mask used for created files.
ForceMask *os.FileMode
// Timestamp, if set, will be set in each header as create/mod/access time
Timestamp *time.Time
}
)
@ -78,10 +81,9 @@ const (
windows = "windows"
darwin = "darwin"
freebsd = "freebsd"
linux = "linux"
)
var xattrsToIgnore = map[string]interface{}{
var xattrsToIgnore = map[string]any{
"security.selinux": true,
}
@ -179,6 +181,7 @@ func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
defer func() {
if Err != nil {
// In the normal case, the buffer is embedded in the ReadCloser return.
p.Put(buf)
}
}()
@ -375,7 +378,7 @@ type nosysFileInfo struct {
os.FileInfo
}
func (fi nosysFileInfo) Sys() interface{} {
func (fi nosysFileInfo) Sys() any {
// A Sys value of type *tar.Header is safe as it is system-independent.
// The tar.FileInfoHeader function copies the fields into the returned
// header without performing any OS lookups.
@ -475,7 +478,7 @@ type TarWhiteoutConverter interface {
ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error)
}
type tarAppender struct {
type tarWriter struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
@ -494,15 +497,19 @@ type tarAppender struct {
// from the traditional behavior/format to get features like subsecond
// precision in timestamps.
CopyPass bool
// Timestamp, if set, will be set in each header as create/mod/access time
Timestamp *time.Time
}
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
return &tarAppender{
func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter {
return &tarWriter{
SeenFiles: make(map[uint64]string),
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
IDMappings: idMapping,
ChownOpts: chownOpts,
Timestamp: timestamp,
}
}
@ -521,8 +528,8 @@ func canonicalTarName(name string, isDir bool) (string, error) {
return name, nil
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
// addFile adds a file from `path` as `name` to the tar archive.
func (ta *tarWriter) addFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
return err
@ -600,6 +607,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Gname = ""
}
// if override timestamp set, replace all times with this
if ta.Timestamp != nil {
hdr.ModTime = *ta.Timestamp
hdr.AccessTime = *ta.Timestamp
hdr.ChangeTime = *ta.Timestamp
}
maybeTruncateHeaderModTime(hdr)
if ta.WhiteoutConverter != nil {
@ -650,7 +664,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@ -673,7 +687,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if fi, err := os.Lstat(path); err != nil || !fi.IsDir() {
if err := os.Mkdir(path, mask); err != nil {
return err
}
@ -691,7 +705,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
file.Close()
return err
}
file.Close()
if err := file.Close(); err != nil {
return err
}
case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
@ -845,41 +861,39 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
defer func() {
if err := dest.Close(); err != nil && result == nil {
result = err
}
}()
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return nil, err
return err
}
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
compressWriter, err := CompressStream(dest, options.Compression)
if err != nil {
return nil, err
return err
}
go func() {
ta := newTarAppender(
ta := newTarWriter(
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
compressWriter,
options.ChownOpts,
options.Timestamp,
)
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
ta.CopyPass = options.CopyPass
includeFiles := options.IncludeFiles
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Errorf("Can't close pipe writer: %s", err)
if err := compressWriter.Close(); err != nil && result == nil {
result = err
}
}()
@ -893,7 +907,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
stat, err := os.Lstat(srcPath)
if err != nil {
return
return err
}
if !stat.IsDir() {
@ -901,22 +915,22 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
if len(includeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
includeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 {
options.IncludeFiles = []string{"."}
if len(includeFiles) == 0 {
includeFiles = []string{"."}
}
seen := make(map[string]bool)
for _, include := range options.IncludeFiles {
for _, include := range includeFiles {
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
@ -1002,7 +1016,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
if err := ta.addFile(filePath, relFilePath); err != nil {
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
@ -1011,10 +1025,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
return nil
}); err != nil {
logrus.Errorf("%s", err)
return
return err
}
}
return ta.TarWriter.Close()
}
pipeReader, pipeWriter := io.Pipe()
go func() {
err := tarWithOptionsTo(pipeWriter, srcPath, options)
if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil {
logrus.Errorf("Can't close pipe writer: %s", pipeErr)
}
}()
return pipeReader, nil
@ -1110,7 +1132,7 @@ loop:
continue
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
if err := os.RemoveAll(path); err != nil {
return err
}
@ -1137,7 +1159,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
@ -1201,9 +1223,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
r := tarArchive
if decompress {
@ -1389,7 +1408,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
} else if runtime.GOOS == darwin {
uid, gid = hdr.Uid, hdr.Gid
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
attrs := strings.Split(string(xstat), ":")
attrs := strings.Split(xstat, ":")
if len(attrs) >= 3 {
val, err := strconv.ParseUint(attrs[0], 10, 32)
if err != nil {

View File

@ -16,7 +16,7 @@ func getOverlayOpaqueXattrName() string {
return GetOverlayXattrName("opaque")
}
func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter {
func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
if rolayers, ok := data.([]string); ok && len(rolayers) > 0 {
return overlayWhiteoutConverter{rolayers: rolayers}
@ -173,7 +173,7 @@ func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (boo
func isWhiteOut(stat os.FileInfo) bool {
s := stat.Sys().(*syscall.Stat_t)
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert
}
func GetFileOwner(path string) (uint32, uint32, uint32, error) {

View File

@ -67,7 +67,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
@ -82,7 +82,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
func getInodeFromStat(stat any) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
@ -92,7 +92,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
return
}
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
func getFileUIDGID(stat any) (idtools.IDPair, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {

View File

@ -70,7 +70,7 @@ func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
return a.Equal(b) ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
@ -452,7 +452,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil)
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
@ -481,7 +481,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
if err := ta.addFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}

View File

@ -174,14 +174,7 @@ func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
for ix1 < len(names1) && ix2 < len(names2) {
ni1 := names1[ix1]
ni2 := names2[ix2]
@ -304,7 +297,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno)
continue
}
builder := make([]byte, 0, dirent.Reclen)
for i := 0; i < len(dirent.Name); i++ {
for i := range len(dirent.Name) {
if dirent.Name[i] == 0 {
break
}

View File

@ -31,9 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""
@ -107,7 +104,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}
}
@ -176,12 +173,12 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
// We always reset the immutable flag (if present) to allow metadata
// changes and to allow directory modification. The flag will be
// re-applied based on the contents of hdr either at the end for
// directories or in createTarFile otherwise.
// directories or in extractTarFileEntry otherwise.
if fi, err := os.Lstat(path); err == nil {
if err := resetImmutable(path, &fi); err != nil {
return 0, err
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
@ -212,7 +209,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}

View File

@ -69,9 +69,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
options = &archive.TarOptions{}
options.InUserNS = unshare.IsRootless()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMappings.RootPair()

View File

@ -98,9 +98,6 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
options.InUserNS = true
}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {

View File

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View File

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View File

@ -65,7 +65,7 @@ func (bf *bloomFilter) writeTo(writer io.Writer) error {
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
return err
}
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
if err := binary.Write(writer, binary.LittleEndian, bf.k); err != nil {
return err
}
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {

View File

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"maps"
"slices"
"strconv"
"time"
@ -17,7 +18,6 @@ import (
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
expMaps "golang.org/x/exp/maps"
)
const (
@ -87,7 +87,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
}
size := int64(blobSize - footerSize - tocOffset)
size := blobSize - footerSize - tocOffset
// set a reasonable limit
if size > maxTocSize {
// Not errFallbackCanConvert: we would still use too much memory.
@ -310,7 +310,7 @@ func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
return err
}
if len(pendingFiles) != 0 {
remaining := expMaps.Keys(pendingFiles)
remaining := slices.Collect(maps.Keys(pendingFiles))
if len(remaining) > 5 {
remaining = remaining[:5] // Just to limit the size of the output.
}

View File

@ -142,10 +142,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
rc.IsLastChunkZeros = false
if rc.pendingHole > 0 {
toCopy := int64(len(b))
if rc.pendingHole < toCopy {
toCopy = rc.pendingHole
}
toCopy := min(rc.pendingHole, int64(len(b)))
rc.pendingHole -= toCopy
for i := int64(0); i < toCopy; i++ {
b[i] = 0
@ -163,7 +160,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
return false, 0, io.EOF
}
for i := 0; i < len(b); i++ {
for i := range b {
holeLen, n, err := rc.reader.readByte()
if err != nil {
if err == io.EOF {

View File

@ -43,7 +43,7 @@ func escaped(val []byte, escape int) string {
}
var result string
for _, c := range []byte(val) {
for _, c := range val {
hexEscape := false
var special string
@ -214,7 +214,7 @@ func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[s
}
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
func GenerateDump(tocI any, verityDigests map[string]string) (io.Reader, error) {
toc, ok := tocI.(*minimal.TOC)
if !ok {
return nil, fmt.Errorf("invalid TOC type")

View File

@ -234,7 +234,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
Offset: manifestOffset,
LengthCompressed: uint64(len(compressedManifest)),
LengthUncompressed: uint64(len(manifest)),
OffsetTarSplit: uint64(tarSplitOffset),
OffsetTarSplit: tarSplitOffset,
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
}

View File

@ -111,7 +111,7 @@ type chunkedDiffer struct {
useFsVerity graphdriver.DifferFsVerity
}
var xattrsToIgnore = map[string]interface{}{
var xattrsToIgnore = map[string]any{
"security.selinux": true,
}
@ -162,6 +162,8 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
return 0, nil, "", nil, err
}
defer diff.Close()
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
if err != nil {
return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err}
@ -696,18 +698,12 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed
// hashHole writes SIZE zeros to the specified hasher
func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
count := int64(len(copyBuffer))
if size < count {
count = size
}
count := min(size, int64(len(copyBuffer)))
for i := int64(0); i < count; i++ {
copyBuffer[i] = 0
}
for size > 0 {
count = int64(len(copyBuffer))
if size < count {
count = size
}
count = min(size, int64(len(copyBuffer)))
if _, err := h.Write(copyBuffer[:count]); err != nil {
return err
}
@ -1015,7 +1011,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
!missingParts[prevIndex].Hole && !missingParts[i].Hole &&
len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
missingParts[prevIndex].SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
} else {
@ -1073,7 +1069,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
} else {
gap := getGap(missingParts, i)
prev := &newMissingParts[len(newMissingParts)-1]
prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
prev.SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
prev.Hole = false
prev.OriginFile = nil
if gap > 0 {
@ -1269,7 +1265,7 @@ func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCl
tooManyStreams := false
streamsSoFar := 0
err := errors.New("Unexpected error in getBlobAtGoroutine")
err := errors.New("unexpected error in getBlobAtGoroutine")
defer func() {
if err != nil {
@ -1487,7 +1483,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
bigDataKey: c.manifest,
chunkedLayerDataKey: lcdBigData,
},
Artifacts: map[string]interface{}{
Artifacts: map[string]any{
tocKey: toc,
},
TOCDigest: c.tocDigest,
@ -1765,7 +1761,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
// the file is missing, attempt to find individual chunks.
for _, chunk := range r.chunks {
compressedSize := int64(chunk.EndOffset - chunk.Offset)
compressedSize := chunk.EndOffset - chunk.Offset
size := remainingSize
if chunk.ChunkSize > 0 {
size = chunk.ChunkSize

View File

@ -42,13 +42,11 @@ func Usage(dir string) (usage *DiskUsage, err error) {
// Check inode to only count the sizes of files with multiple hard links once.
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
// inode is not a uint64 on all platforms. Cast it to avoid issues.
if _, exists := data[uint64(inode)]; exists {
if _, exists := data[inode]; exists {
return nil
}
// inode is not a uint64 on all platforms. Cast it to avoid issues.
data[uint64(inode)] = struct{}{}
data[inode] = struct{}{}
// Ignore directory sizes
if entry.IsDir() {
return nil

View File

@ -13,7 +13,7 @@ import (
func Exists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}
@ -25,7 +25,7 @@ func Exists(path string) error {
func Lexists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}

View File

@ -0,0 +1,20 @@
package fileutils
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// ReflinkOrCopy attempts to reflink the source to the destination fd.
// If reflinking fails or is unsupported, it falls back to io.Copy().
func ReflinkOrCopy(src, dst *os.File) error {
err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd()))
if err == nil {
return nil
}
_, err = io.Copy(dst, src)
return err
}

View File

@ -0,0 +1,15 @@
//go:build !linux
package fileutils
import (
"io"
"os"
)
// ReflinkOrCopy attempts to reflink the source to the destination fd.
// If reflinking fails or is unsupported, it falls back to io.Copy().
func ReflinkOrCopy(src, dst *os.File) error {
_, err := io.Copy(dst, src)
return err
}

View File

@ -25,7 +25,7 @@ func CreateIDMappedMount(source, target string, pid int) error {
}
defer userNsFile.Close()
targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
targetDirFd, err := unix.OpenTree(unix.AT_FDCWD, source, unix.OPEN_TREE_CLONE)
if err != nil {
return &os.PathError{Op: "open_tree", Path: source, Err: err}
}

View File

@ -429,25 +429,25 @@ func parseOverrideXattr(xstat []byte) (Stat, error) {
var stat Stat
attrs := strings.Split(string(xstat), ":")
if len(attrs) < 3 {
return stat, fmt.Errorf("The number of parts in %s is less than 3",
return stat, fmt.Errorf("the number of parts in %s is less than 3",
ContainersOverrideXattr)
}
value, err := strconv.ParseUint(attrs[0], 10, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse UID: %w", err)
return stat, fmt.Errorf("failed to parse UID: %w", err)
}
stat.IDs.UID = int(value)
value, err = strconv.ParseUint(attrs[1], 10, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse GID: %w", err)
return stat, fmt.Errorf("failed to parse GID: %w", err)
}
stat.IDs.GID = int(value)
value, err = strconv.ParseUint(attrs[2], 8, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse mode: %w", err)
return stat, fmt.Errorf("failed to parse mode: %w", err)
}
stat.Mode = os.FileMode(value) & os.ModePerm
if value&0o1000 != 0 {
@ -484,7 +484,7 @@ func parseOverrideXattr(xstat []byte) (Stat, error) {
return stat, err
}
} else {
return stat, fmt.Errorf("Invalid file type %s", typ)
return stat, fmt.Errorf("invalid file type %s", typ)
}
}
return stat, nil
@ -494,18 +494,18 @@ func parseDevice(typ string) (int, int, error) {
parts := strings.Split(typ, "-")
// If there are more than 3 parts, just ignore them to be forward compatible
if len(parts) < 3 {
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
return 0, 0, fmt.Errorf("invalid device type %s", typ)
}
if parts[0] != "block" && parts[0] != "char" {
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
return 0, 0, fmt.Errorf("invalid device type %s", typ)
}
major, err := strconv.Atoi(parts[1])
if err != nil {
return 0, 0, fmt.Errorf("Failed to parse major number: %w", err)
return 0, 0, fmt.Errorf("failed to parse major number: %w", err)
}
minor, err := strconv.Atoi(parts[2])
if err != nil {
return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err)
return 0, 0, fmt.Errorf("failed to parse minor number: %w", err)
}
return major, minor, nil
}

View File

@ -5,6 +5,7 @@ package idtools
import (
"errors"
"os/user"
"sync"
"unsafe"
)
@ -13,16 +14,14 @@ import (
#include <shadow/subid.h>
#include <stdlib.h>
#include <stdio.h>
const char *Prog = "storage";
FILE *shadow_logfd = NULL;
struct subid_range get_range(struct subid_range *ranges, int i)
{
shadow_logfd = stderr;
return ranges[i];
}
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
# define subid_init libsubid_init
# define subid_get_uid_ranges get_subuid_ranges
# define subid_get_gid_ranges get_subgid_ranges
#endif
@ -30,6 +29,8 @@ struct subid_range get_range(struct subid_range *ranges, int i)
*/
import "C"
var onceInit sync.Once
func readSubid(username string, isUser bool) (ranges, error) {
var ret ranges
uidstr := ""
@ -42,6 +43,10 @@ func readSubid(username string, isUser bool) (ranges, error) {
uidstr = u.Uid
}
onceInit.Do(func() {
C.subid_init(C.CString("storage"), C.stderr)
})
cUsername := C.CString(username)
defer C.free(unsafe.Pointer(cUsername))

View File

@ -93,10 +93,7 @@ loop0:
}
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
if nextCap > maxCap {
nextCap = maxCap
}
nextCap := min(b.Cap()*2, maxCap)
bp.buf = append(bp.buf, getBuffer(nextCap))
}
bp.wait.Broadcast()
@ -178,7 +175,7 @@ func getBuffer(size int) *fixedBuffer {
bufPoolsLock.Lock()
pool, ok := bufPools[size]
if !ok {
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }}
bufPools[size] = pool
}
bufPoolsLock.Unlock()

View File

@ -16,8 +16,8 @@ import (
// Loopback related errors
var (
ErrAttachLoopbackDevice = errors.New("loopback attach failed")
ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
ErrSetCapacity = errors.New("Unable set loopback capacity")
ErrGetLoopbackBackingFile = errors.New("unable to get loopback backing file")
ErrSetCapacity = errors.New("unable set loopback capacity")
)
func stringToLoopName(src string) [LoNameSize]uint8 {
@ -113,7 +113,7 @@ func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (*os.File
logrus.Errorf("Getting loopback backing file: %s", err)
return nil, ErrGetLoopbackBackingFile
}
if dev != uint64(st.Dev) || ino != st.Ino {
if dev != uint64(st.Dev) || ino != st.Ino { //nolint:unconvert
logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
}
return loopFile, nil

View File

@ -2,6 +2,10 @@
package loopback
import (
"golang.org/x/sys/unix"
)
type loopInfo64 struct {
loDevice uint64 /* ioctl r/o */
loInode uint64 /* ioctl r/o */
@ -20,19 +24,19 @@ type loopInfo64 struct {
// IOCTL consts
const (
LoopSetFd = 0x4C00
LoopCtlGetFree = 0x4C82
LoopGetStatus64 = 0x4C05
LoopSetStatus64 = 0x4C04
LoopClrFd = 0x4C01
LoopSetCapacity = 0x4C07
LoopSetFd = unix.LOOP_SET_FD
LoopCtlGetFree = unix.LOOP_CTL_GET_FREE
LoopGetStatus64 = unix.LOOP_GET_STATUS64
LoopSetStatus64 = unix.LOOP_SET_STATUS64
LoopClrFd = unix.LOOP_CLR_FD
LoopSetCapacity = unix.LOOP_SET_CAPACITY
)
// LOOP consts.
const (
LoFlagsAutoClear = 0x4C07
LoFlagsReadOnly = 1
LoFlagsPartScan = 8
LoKeySize = 32
LoNameSize = 64
LoFlagsAutoClear = unix.LO_FLAGS_AUTOCLEAR
LoFlagsReadOnly = unix.LO_FLAGS_READ_ONLY
LoFlagsPartScan = unix.LO_FLAGS_PARTSCAN
LoKeySize = unix.LO_KEY_SIZE
LoNameSize = unix.LO_NAME_SIZE
)

View File

@ -36,7 +36,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
return nil
}
targetInode := stat.Sys().(*syscall.Stat_t).Ino
targetDevice := stat.Sys().(*syscall.Stat_t).Dev
targetDevice := uint64(stat.Sys().(*syscall.Stat_t).Dev) //nolint:unconvert
for i := 0; true; i++ {
path := fmt.Sprintf("/dev/loop%d", i)
@ -53,7 +53,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
}
dev, inode, err := getLoopbackBackingFile(file)
if err == nil && dev == uint64(targetDevice) && inode == targetInode {
if err == nil && dev == targetDevice && inode == targetInode {
return file
}
file.Close()

View File

@ -40,7 +40,7 @@ func init() {
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
pool := &sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
New: func() any { return bufio.NewReaderSize(nil, size) },
}
return &BufioReaderPool{pool: pool}
}
@ -87,7 +87,7 @@ type BufioWriterPool struct {
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
pool := &sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
New: func() any { return bufio.NewWriterSize(nil, size) },
}
return &BufioWriterPool{pool: pool}
}

View File

@ -49,7 +49,7 @@ func panicIfNotInitialized() {
}
}
func naiveSelf() string { //nolint: unused
func naiveSelf() string {
name := os.Args[0]
if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil {

View File

@ -24,7 +24,7 @@ func GenerateRandomASCIIString(n int) string {
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
res := make([]byte, n)
for i := 0; i < n; i++ {
for i := range n {
res[i] = chars[rand.IntN(len(chars))]
}
return string(res)
@ -83,7 +83,7 @@ func quote(word string, buf *bytes.Buffer) {
buf.WriteString("'")
for i := 0; i < len(word); i++ {
for i := range len(word) {
b := word[i]
if b == '\'' {
// Replace literal ' with a close ', a \', and an open '

View File

@ -9,9 +9,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
rdev: uint64(s.Rdev), //nolint:unconvert
mtim: s.Mtim,
dev: uint64(s.Dev),
dev: uint64(s.Dev), //nolint:unconvert
}, nil
}

View File

@ -32,9 +32,9 @@ type Cmd struct {
*exec.Cmd
UnshareFlags int
UseNewuidmap bool
UidMappings []specs.LinuxIDMapping // nolint: revive,golint
UidMappings []specs.LinuxIDMapping //nolint: revive
UseNewgidmap bool
GidMappings []specs.LinuxIDMapping // nolint: revive,golint
GidMappings []specs.LinuxIDMapping //nolint: revive
GidMappingsEnableSetgroups bool
Setsid bool
Setpgrp bool
@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error
return cap.Get(capability.EFFECTIVE, capid), nil
}
func (c *Cmd) Start() error {
func (c *Cmd) Start() (retErr error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@ -167,6 +167,15 @@ func (c *Cmd) Start() error {
return err
}
// If the function fails from here, we need to make sure the
// child process is killed and properly cleaned up.
defer func() {
if retErr != nil {
_ = c.Cmd.Process.Kill()
_ = c.Cmd.Wait()
}
}()
// Close the ends of the pipes that the parent doesn't need.
continueRead.Close()
continueRead = nil
@ -240,7 +249,7 @@ func (c *Cmd) Start() error {
if err != nil {
return fmt.Errorf("finding newgidmap: %w", err)
}
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...)
g.Reset()
cmd.Stdout = g
cmd.Stderr = g
@ -258,7 +267,7 @@ func (c *Cmd) Start() error {
}
logrus.Warnf("Falling back to single mapping")
g.Reset()
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
fmt.Fprintf(g, "0 %d 1\n", os.Getegid())
}
}
if !gidmapSet {
@ -300,7 +309,7 @@ func (c *Cmd) Start() error {
if err != nil {
return fmt.Errorf("finding newuidmap: %w", err)
}
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...)
u.Reset()
cmd.Stdout = u
cmd.Stderr = u
@ -319,7 +328,7 @@ func (c *Cmd) Start() error {
logrus.Warnf("Falling back to single mapping")
u.Reset()
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
fmt.Fprintf(u, "0 %d 1\n", os.Geteuid())
}
}
if !uidmapSet {
@ -459,7 +468,7 @@ type Runnable interface {
Run() error
}
func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname
func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname
if err != nil {
if format != "" {
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)

View File

@ -18,6 +18,7 @@ import (
// register all of the built-in drivers
_ "github.com/containers/storage/drivers/register"
"golang.org/x/sync/errgroup"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/dedup"
@ -30,7 +31,6 @@ import (
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
@ -162,7 +162,7 @@ type flaggableStore interface {
ClearFlag(id string, flag string) error
// SetFlag sets a named flag and its value on an item in the store.
SetFlag(id string, flag string, value interface{}) error
SetFlag(id string, flag string, value any) error
}
type StoreOptions = types.StoreOptions
@ -672,7 +672,7 @@ type LayerOptions struct {
// Flags is a set of named flags and their values to store with the layer.
// Currently these can only be set when the layer record is created, but that
// could change in the future.
Flags map[string]interface{}
Flags map[string]any
}
type LayerBigDataOption struct {
@ -700,7 +700,7 @@ type ImageOptions struct {
NamesHistory []string
// Flags is a set of named flags and their values to store with the image. Currently these can only
// be set when the image record is created, but that could change in the future.
Flags map[string]interface{}
Flags map[string]any
}
type ImageBigDataOption struct {
@ -720,7 +720,7 @@ type ContainerOptions struct {
// Flags is a set of named flags and their values to store with the container.
// Currently these can only be set when the container record is created, but that
// could change in the future.
Flags map[string]interface{}
Flags map[string]any
MountOpts []string
Volatile bool
StorageOpt map[string]string
@ -1649,7 +1649,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...)
if options.Flags == nil {
options.Flags = make(map[string]interface{})
options.Flags = make(map[string]any)
}
maps.Copy(options.Flags, iOptions.Flags)
}
@ -1918,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
}
if options.Flags == nil {
options.Flags = make(map[string]interface{})
options.Flags = make(map[string]any)
}
plabel, _ := options.Flags[processLabelFlag].(string)
mlabel, _ := options.Flags[mountLabelFlag].(string)
@ -2744,7 +2744,7 @@ func (s *store) DeleteContainer(id string) error {
}
}
var wg multierror.Group
var wg errgroup.Group
middleDir := s.graphDriverName + "-containers"
@ -2759,7 +2759,7 @@ func (s *store) DeleteContainer(id string) error {
})
if multierr := wg.Wait(); multierr != nil {
return multierr.ErrorOrNil()
return multierr
}
return s.containerStore.Delete(id)
})
@ -2830,7 +2830,11 @@ func (s *store) Version() ([][2]string, error) {
return [][2]string{}, nil
}
func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) {
if err := validateMountOptions(mountOpts); err != nil {
return "", err
}
// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
// in startUsingGraphDriver().
if err := s.startUsingGraphDriver(); err != nil {
@ -2842,57 +2846,61 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
if err != nil {
return "", err
}
if options.UidMaps != nil || options.GidMaps != nil {
options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps)
}
var imageHomeStore roImageStore
// function used to have a scope for rlstore.StopWriting()
tryMount := func() (string, error) {
if err := rlstore.startWriting(); err != nil {
return "", err
}
defer rlstore.stopWriting()
if rlstore.Exists(id) {
return rlstore.Mount(id, options)
}
return "", nil
}
mountPoint, err := tryMount()
if mountPoint != "" || err != nil {
return mountPoint, err
}
// check if the layer is in a read-only store, and return a better error message
for _, store := range lstores {
if err := store.startReading(); err != nil {
for _, s := range lstores {
if err := s.startReading(); err != nil {
return "", err
}
exists := store.Exists(id)
store.stopReading()
if exists {
return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
defer s.stopReading()
}
if err := s.imageStore.startWriting(); err != nil {
return "", err
}
defer s.imageStore.stopWriting()
cimage, err := s.imageStore.Get(id)
if err == nil {
imageHomeStore = s.imageStore
} else {
for _, s := range s.roImageStores {
if err := s.startReading(); err != nil {
return "", err
}
defer s.stopReading()
cimage, err = s.Get(id)
if err == nil {
imageHomeStore = s
break
}
}
}
if cimage == nil {
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return "", ErrLayerUnknown
}
func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) {
// Append ReadOnly option to mountOptions
img, err := s.Image(id)
idmappingsOpts := types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idmappingsOpts)
if err != nil {
return "", err
}
if err := validateMountOptions(mountOpts); err != nil {
return "", err
if len(ilayer.UIDMap) > 0 || len(ilayer.GIDMap) > 0 {
return "", fmt.Errorf("cannot create an image with canonical UID/GID mappings in a read-only store")
}
options := drivers.MountOpts{
MountLabel: mountLabel,
Options: append(mountOpts, "ro"),
}
return s.mount(img.TopLayer, options)
return rlstore.Mount(ilayer.ID, options)
}
func (s *store) Mount(id, mountLabel string) (string, error) {
@ -2914,7 +2922,43 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
}
}
}
return s.mount(id, options)
// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
// in startUsingGraphDriver().
if err := s.startUsingGraphDriver(); err != nil {
return "", err
}
defer s.stopUsingGraphDriver()
rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return "", err
}
if options.UidMaps != nil || options.GidMaps != nil {
options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps)
}
if err := rlstore.startWriting(); err != nil {
return "", err
}
defer rlstore.stopWriting()
if rlstore.Exists(id) {
return rlstore.Mount(id, options)
}
// check if the layer is in a read-only store, and return a better error message
for _, store := range lstores {
if err := store.startReading(); err != nil {
return "", err
}
exists := store.Exists(id)
store.stopReading()
if exists {
return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
}
}
return "", ErrLayerUnknown
}
func (s *store) Mounted(id string) (int, error) {
@ -2938,7 +2982,23 @@ func (s *store) UnmountImage(id string, force bool) (bool, error) {
if err != nil {
return false, err
}
return s.Unmount(img.TopLayer, force)
return writeToLayerStore(s, func(lstore rwLayerStore) (bool, error) {
for _, layerID := range img.MappedTopLayers {
l, err := lstore.Get(layerID)
if err != nil {
if err == ErrLayerUnknown {
continue
}
return false, err
}
// check if the layer with the canonical mapping is in the mapped top layers
if len(l.UIDMap) == 0 && len(l.GIDMap) == 0 {
return lstore.unmount(l.ID, force, false)
}
}
return lstore.unmount(img.TopLayer, force, false)
})
}
func (s *store) Unmount(id string, force bool) (bool, error) {
@ -3663,7 +3723,7 @@ func makeBigDataBaseName(key string) string {
if err != nil || size != 1 {
break
}
if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') {
if ch != '.' && (ch < '0' || ch > '9') && (ch < 'a' || ch > 'z') {
break
}
}
@ -3703,11 +3763,10 @@ func copyMapPreferringNil[K comparable, V any](m map[K]V) map[K]V {
// newMapFrom returns a shallow clone of map m.
// If m is empty, an empty map is allocated and returned.
func newMapFrom[K comparable, V any](m map[K]V) map[K]V {
ret := make(map[K]V, len(m))
for k, v := range m {
ret[k] = v
if len(m) == 0 {
return make(map[K]V, 0)
}
return ret
return maps.Clone(m)
}
func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption {

View File

@ -394,7 +394,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
if prevReloadConfig.storeOptions != nil && mtime.Equal(prevReloadConfig.mod) && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
return nil
}

View File

@ -14,7 +14,7 @@ import (
func expandEnvPath(path string, rootlessUID int) (string, error) {
var err error
path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
path = strings.ReplaceAll(path, "$UID", strconv.Itoa(rootlessUID))
path = os.ExpandEnv(path)
newpath, err := filepath.EvalSymlinks(path)
if err != nil {
@ -61,7 +61,7 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
if prevReloadConfig.storeOptions != nil && mtime.Equal(prevReloadConfig.mod) && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
return
}

View File

@ -276,10 +276,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rl
// bigger than s.autoNsMaxSize.
// This is a best effort heuristic.
if requestedSize == 0 {
size = initialSize
if s.autoNsMinSize > size {
size = s.autoNsMinSize
}
size = max(s.autoNsMinSize, initialSize)
if image != nil {
sizeFromImage, err := s.getMaxSizeFromImage(image, rlstore, lstores, options.PasswdFile, options.GroupFile)
if err != nil {

View File

@ -6,6 +6,51 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased] ##
## [0.4.1] - 2025-01-28 ##
### Fixed ###
- The restrictions added for `root` paths passed to `SecureJoin` in 0.4.0 was
found to be too strict and caused some regressions when folks tried to
update, so this restriction has been relaxed to only return an error if the
path contains a `..` component. We still recommend users use `filepath.Clean`
(and even `filepath.EvalSymlinks`) on the `root` path they are using, but at
least you will no longer be punished for "trivial" unclean paths.
## [0.4.0] - 2025-01-13 ##
### Breaking ####
- `SecureJoin(VFS)` will now return an error if the provided `root` is not a
`filepath.Clean`'d path.
While it is ultimately the responsibility of the caller to ensure the root is
a safe path to use, passing a path like `/symlink/..` as a root would result
in the `SecureJoin`'d path being placed in `/` even though `/symlink/..`
might be a different directory, and so we should more strongly discourage
such usage.
All major users of `securejoin.SecureJoin` already ensure that the paths they
provide are safe (and this is ultimately a question of user error), but
removing this foot-gun is probably a good idea. Of course, this is
necessarily a breaking API change (though we expect no real users to be
affected by it).
Thanks to [Erik Sjölund](https://github.com/eriksjolund), who initially
reported this issue as a possible security issue.
- `MkdirAll` and `MkdirHandle` now take an `os.FileMode`-style mode argument
instead of a raw `unix.S_*`-style mode argument, which may cause compile-time
type errors depending on how you use `filepath-securejoin`. For most users,
there will be no change in behaviour aside from the type change (as the
bottom `0o777` bits are the same in both formats, and most users are probably
only using those bits).
However, if you were using `unix.S_ISVTX` to set the sticky bit with
`MkdirAll(Handle)` you will need to switch to `os.ModeSticky` otherwise you
will get a runtime error with this update. In addition, the error message you
will get from passing `unix.S_ISUID` and `unix.S_ISGID` will be different as
they are treated as invalid bits now (note that previously passing said bits
was also an error).
## [0.3.6] - 2024-12-17 ##
### Compatibility ###
@ -193,7 +238,9 @@ This is our first release of `github.com/cyphar/filepath-securejoin`,
containing a full implementation with a coverage of 93.5% (the only missing
cases are the error cases, which are hard to mocktest at the moment).
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...HEAD
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD
[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0
[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6
[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5
[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4

View File

@ -1 +1 @@
0.3.6
0.4.1

View File

@ -1,5 +1,5 @@
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
// Copyright (C) 2017-2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -24,6 +24,31 @@ func IsNotExist(err error) bool {
return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
}
// errUnsafeRoot is returned if the user provides SecureJoinVFS with a path
// that contains ".." components.
var errUnsafeRoot = errors.New("root path provided to SecureJoin contains '..' components")
// stripVolume just gets rid of the Windows volume included in a path. Based on
// some godbolt tests, the Go compiler is smart enough to make this a no-op on
// Linux.
func stripVolume(path string) string {
return path[len(filepath.VolumeName(path)):]
}
// hasDotDot checks if the path contains ".." components in a platform-agnostic
// way.
func hasDotDot(path string) bool {
// If we are on Windows, strip any volume letters. It turns out that
// C:..\foo may (or may not) be a valid pathname and we need to handle that
// leading "..".
path = stripVolume(path)
// Look for "/../" in the path, but we need to handle leading and trailing
// ".."s by adding separators. Doing this with filepath.Separator is ugly
// so just convert to Unix-style "/" first.
path = filepath.ToSlash(path)
return strings.Contains("/"+path+"/", "/../")
}
// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
@ -46,7 +71,22 @@ func IsNotExist(err error) bool {
// provided via direct input or when evaluating symlinks. Therefore:
//
// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
//
// If the provided root is not [filepath.Clean] then an error will be returned,
// as such root paths are bordering on somewhat unsafe and using such paths is
// not best practice. We also strongly suggest that any root path is first
// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to
// avoid containing symlink components. Of course, the root also *must not* be
// attacker-controlled.
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// The root path must not contain ".." components, otherwise when we join
// the subpath we will end up with a weird path. We could work around this
// in other ways but users shouldn't be giving us non-lexical root paths in
// the first place.
if hasDotDot(root) {
return "", errUnsafeRoot
}
// Use the os.* VFS implementation if none was specified.
if vfs == nil {
vfs = osVFS{}
@ -59,9 +99,10 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
linksWalked int
)
for remainingPath != "" {
if v := filepath.VolumeName(remainingPath); v != "" {
remainingPath = remainingPath[len(v):]
}
// On Windows, if we managed to end up at a path referencing a volume,
// drop the volume to make sure we don't end up with broken paths or
// escaping the root volume.
remainingPath = stripVolume(remainingPath)
// Get the next path component.
var part string

View File

@ -21,6 +21,33 @@ var (
errPossibleAttack = errors.New("possible attack detected")
)
// modePermExt is like os.ModePerm except that it also includes the set[ug]id
// and sticky bits.
const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
//nolint:cyclop // this function needs to handle a lot of cases
func toUnixMode(mode os.FileMode) (uint32, error) {
sysMode := uint32(mode.Perm())
if mode&os.ModeSetuid != 0 {
sysMode |= unix.S_ISUID
}
if mode&os.ModeSetgid != 0 {
sysMode |= unix.S_ISGID
}
if mode&os.ModeSticky != 0 {
sysMode |= unix.S_ISVTX
}
// We don't allow file type bits.
if mode&os.ModeType != 0 {
return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode)
}
// We don't allow other unknown modes.
if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 {
return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode)
}
return sysMode, nil
}
// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use
// in two respects:
//
@ -39,17 +66,17 @@ var (
// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after
// doing [MkdirAll]. If you intend to open the directory after creating it, you
// should use MkdirAllHandle.
func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) {
// Make sure there are no os.FileMode bits set.
if mode&^0o7777 != 0 {
return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode)
func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) {
unixMode, err := toUnixMode(mode)
if err != nil {
return nil, err
}
// On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid
// bits. We could also silently ignore them but since we have very few
// users it seems more prudent to return an error so users notice that
// these bits will not be set.
if mode&^0o1777 != 0 {
return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode)
if unixMode&^0o1777 != 0 {
return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode)
}
// Try to open as much of the path as possible.
@ -104,9 +131,6 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath)
}
// Make sure the mode doesn't have any type bits.
mode &^= unix.S_IFMT
// Create the remaining components.
for _, part := range remainingParts {
switch part {
@ -123,7 +147,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
// directory at the same time as us. In that case, just continue on as
// if we created it (if the created inode is not a directory, the
// following open call will fail).
if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil && !errors.Is(err, unix.EEXIST) {
if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) {
err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err}
// Make the error a bit nicer if the directory is dead.
if deadErr := isDeadInode(currentDir); deadErr != nil {
@ -196,10 +220,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
// If you plan to open the directory after you have created it or want to use
// an open directory handle as the root, you should use [MkdirAllHandle] instead.
// This function is a wrapper around [MkdirAllHandle].
//
// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not
// the Go generic mode bits ([os.FileMode]...).
func MkdirAll(root, unsafePath string, mode int) error {
func MkdirAll(root, unsafePath string, mode os.FileMode) error {
rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return err

View File

@ -14,8 +14,34 @@ This package provides various compression algorithms.
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# package usage
Use `go get github.com/klauspost/compress@latest` to add it to your project.
This package will support the current Go version and 2 versions back.
* Use the `nounsafe` tag to disable all use of the "unsafe" package.
* Use the `noasm` tag to disable all assembly across packages.
Use the links above for more information on each.
# changelog
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
* flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
* flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
* s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
* gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
* gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
* s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
* s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
<summary>See changes to v1.15.x</summary>
* Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
* zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
* July 13, 2022 (v1.15.8)
@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
* flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
* zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
* flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
* gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
|--------------------|-----------------------------------------|--------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
Typical speed is about 2x of the standard library packages.
| old import | new import | Documentation |
|------------------|---------------------------------------|-------------------------------------------------------------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
# Performance Update 2018
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
## Overall differences.
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
## Web Content
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
## Object files
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
## Huffman only compression
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# Other packages

View File

@ -6,8 +6,10 @@
package flate
import (
"encoding/binary"
"fmt"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
type fastEnc interface {
@ -58,11 +60,11 @@ const (
)
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
return le.Load64(b, i)
}
type tableEntry struct {
@ -134,8 +136,8 @@ func hashLen(u uint64, length, mls uint8) uint32 {
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
func (e *fastGen) matchlen(s, t int, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
@ -149,18 +151,34 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
s1 := min(s+maxMatchLength-4, len(src))
left := s1 - s
n := int32(0)
for left >= 8 {
diff := le.Load64(src, s) ^ le.Load64(src, t)
if diff != 0 {
return n + int32(bits.TrailingZeros64(diff)>>3)
}
s += 8
t += 8
n += 8
left -= 8
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
a := src[s:s1]
b := src[t:]
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
@ -176,7 +194,28 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
left := len(src) - s
n := int32(0)
for left >= 8 {
diff := le.Load64(src, s) ^ le.Load64(src, t)
if diff != 0 {
return n + int32(bits.TrailingZeros64(diff)>>3)
}
s += 8
t += 8
n += 8
left -= 8
}
a := src[s:]
b := src[t:]
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
// Reset the encoding table.

View File

@ -5,10 +5,11 @@
package flate
import (
"encoding/binary"
"fmt"
"io"
"math"
"github.com/klauspost/compress/internal/le"
)
const (
@ -438,7 +439,7 @@ func (w *huffmanBitWriter) writeOutBits() {
n := w.nbytes
// We over-write, but faster...
binary.LittleEndian.PutUint64(w.bytes[n:], bits)
le.Store64(w.bytes[n:], bits)
n += 6
if n >= bufferFlushSize {
@ -854,7 +855,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
@ -882,7 +883,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
@ -905,7 +906,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
@ -931,7 +932,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
@ -953,7 +954,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
nbits += uint8(offsetComb)
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
@ -1107,7 +1108,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// We must have at least 48 bits free.
if nbits >= 8 {
n := nbits >> 3
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
bits >>= (n * 8) & 63
nbits -= n * 8
nbytes += n
@ -1136,7 +1137,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Remaining...
for _, t := range input {
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48

View File

@ -1,9 +1,9 @@
package flate
import (
"encoding/binary"
"fmt"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
// fastGen maintains the table for matches,
@ -77,6 +77,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
nextS := s
var candidate tableEntry
var t int32
for {
nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash]
@ -88,9 +89,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, tableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
t = candidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
@ -103,8 +103,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
t = candidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
@ -120,36 +120,10 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
var l = int32(4)
if false {
l = e.matchlenLong(s+4, t+4, src) + 4
} else {
// inlined:
a := src[s+4:]
b := src[t+4:]
for len(a) >= 8 {
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
l += int32(bits.TrailingZeros64(diff) >> 3)
break
}
l += 8
a = a[8:]
b = b[8:]
}
if len(a) < 8 {
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
break
}
l++
}
}
}
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) {
s--
t--
l++
@ -221,8 +195,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
t = candidate.offset - e.cur
if s-t > maxMatchOffset || uint32(x) != load3232(src, t) {
cv = x >> 8
s++
break

View File

@ -126,7 +126,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
l := e.matchlenLong(s+4, t+4, src) + 4
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {

View File

@ -135,7 +135,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
// Extend the 4-byte match as long as possible.
//
t := candidate.offset - e.cur
l := e.matchlenLong(s+4, t+4, src) + 4
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {

View File

@ -98,19 +98,19 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
e.bTable[nextHashL] = entry
t = lCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// We got a long match. Use that.
break
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
lCandidate = e.bTable[hash7(next, tableBits)]
// If the next long is a candidate, check if we should use that instead...
lOff := nextS - (lCandidate.offset - e.cur)
if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
lOff := lCandidate.offset - e.cur
if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) {
l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
if l2 > l1 {
s = nextS
@ -127,7 +127,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// them as literal bytes.
// Extend the 4-byte match as long as possible.
l := e.matchlenLong(s+4, t+4, src) + 4
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {

View File

@ -111,16 +111,16 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
if uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
l = e.matchlen(int(s+4), int(t+4), src) + 4
ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
if ml1 > l {
t = t2
l = ml1
@ -130,7 +130,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
@ -140,9 +140,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
l = e.matchlen(int(s+4), int(t+4), src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
@ -153,8 +153,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
@ -164,8 +164,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
@ -185,9 +185,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
l = e.matchlenLong(int(s+4), int(t+4), src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
l += e.matchlenLong(int(s+l), int(t+l), src)
}
// Try to locate a better match by checking the end of best match...
@ -203,7 +203,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
t = t2
l = l2
s = s2
@ -423,14 +423,14 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
if uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
@ -442,7 +442,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
@ -452,7 +452,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
@ -465,7 +465,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
if load3232(src, t2) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
@ -476,7 +476,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2

View File

@ -113,7 +113,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
if uint32(cv) == load3232(src, t) {
// Long candidate matches at least 4 bytes.
// Store the next match
@ -123,9 +123,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Check the previous long candidate as well.
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
l = e.matchlen(int(s+4), int(t+4), src) + 4
ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
if ml1 > l {
t = t2
l = ml1
@ -136,7 +136,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
}
// Current value did not match, but check if previous long value does.
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
@ -146,9 +146,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
l = e.matchlen(int(s+4), int(t+4), src) + 4
// Look up next long candidate (at nextS)
lCandidate = e.bTable[nextHashL]
@ -162,7 +162,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const repOff = 1
t2 := s - repeat + repOff
if load3232(src, t2) == uint32(cv>>(8*repOff)) {
ml := e.matchlen(s+4+repOff, t2+4, src) + 4
ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4
if ml > l {
t = t2
l = ml
@ -175,8 +175,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// If the next long is a candidate, use that...
t2 = lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
@ -186,8 +186,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
@ -207,9 +207,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Extend the 4-byte match as long as possible.
if l == 0 {
l = e.matchlenLong(s+4, t+4, src) + 4
l = e.matchlenLong(int(s+4), int(t+4), src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
l += e.matchlenLong(int(s+l), int(t+l), src)
}
// Try to locate a better match by checking the end-of-match...
@ -227,7 +227,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
t = t2
l = l2
s = s2
@ -237,7 +237,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
t2 = eLong.Prev.offset - e.cur - l + skipBeginning
off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
t = t2
l = l2
s = s2

View File

@ -1,16 +0,0 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
// matchLen returns how many bytes match in a and b
//
// It assumes that:
//
// len(a) <= len(b) and len(a) > 0
//
//go:noescape
func matchLen(a []byte, b []byte) int

View File

@ -1,66 +0,0 @@
// Copied from S2 implementation.
//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
MOVQ a_len+8(FP), DX
// matchLen
XORL SI, SI
CMPL DX, $0x08
JB matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
matchlen_match4_standalone:
CMPL DX, $0x04
JB matchlen_match2_standalone
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
CMPL DX, $0x02
JB matchlen_match1_standalone
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
LEAL -2(DX), DX
LEAL 2(SI), SI
matchlen_match1_standalone:
CMPL DX, $0x01
JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
INCL SI
gen_match_len_end:
MOVQ SI, ret+48(FP)
RET

View File

@ -1,27 +1,29 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
left := len(a)
for left >= 8 {
diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
left -= 8
}
a = a[n:]
b = b[n:]
for i := range a {
if a[i] != b[i] {
break
@ -29,5 +31,4 @@ func matchLen(a, b []byte) (n int) {
n++
}
return n
}

View File

@ -4,6 +4,8 @@ import (
"io"
"math"
"sync"
"github.com/klauspost/compress/internal/le"
)
const (
@ -152,18 +154,11 @@ func hashSL(u uint32) uint32 {
}
func load3216(b []byte, i int16) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:4]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
return le.Load32(b, i)
}
func load6416(b []byte, i int16) uint64 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:8]
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
return le.Load64(b, i)
}
func statelessEnc(dst *tokens, src []byte, startAt int16) {

View File

@ -6,10 +6,11 @@
package huff0
import (
"encoding/binary"
"errors"
"fmt"
"io"
"github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
return nil
}
// peekBitsFast requires that at least one bit is requested every time.
// peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56)
@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
}
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks.
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
if b.bitsRead < 32 {
return
}
if b.off > 4 {
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
if b.off >= 4 {
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
return
}
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() {
// Do single re-slice to avoid bounds checks.
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
return
}
if b.off > 4 {
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4

View File

@ -0,0 +1,5 @@
package le
type Indexer interface {
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
}

View File

@ -0,0 +1,42 @@
//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
package le
import (
"encoding/binary"
)
// Load8 will load from b at index i.
func Load8[I Indexer](b []byte, i I) byte {
return b[i]
}
// Load16 will load from b at index i.
func Load16[I Indexer](b []byte, i I) uint16 {
return binary.LittleEndian.Uint16(b[i:])
}
// Load32 will load from b at index i.
func Load32[I Indexer](b []byte, i I) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
// Load64 will load from b at index i.
func Load64[I Indexer](b []byte, i I) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
// Store16 will store v at b.
func Store16(b []byte, v uint16) {
binary.LittleEndian.PutUint16(b, v)
}
// Store32 will store v at b.
func Store32(b []byte, v uint32) {
binary.LittleEndian.PutUint32(b, v)
}
// Store64 will store v at b.
func Store64(b []byte, v uint64) {
binary.LittleEndian.PutUint64(b, v)
}

View File

@ -0,0 +1,55 @@
// We enable 64 bit LE platforms:
//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
package le
import (
"unsafe"
)
// Load8 will load from b at index i.
func Load8[I Indexer](b []byte, i I) byte {
//return binary.LittleEndian.Uint16(b[i:])
//return *(*uint16)(unsafe.Pointer(&b[i]))
return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Load16 will load from b at index i.
func Load16[I Indexer](b []byte, i I) uint16 {
//return binary.LittleEndian.Uint16(b[i:])
//return *(*uint16)(unsafe.Pointer(&b[i]))
return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Load32 will load from b at index i.
func Load32[I Indexer](b []byte, i I) uint32 {
//return binary.LittleEndian.Uint32(b[i:])
//return *(*uint32)(unsafe.Pointer(&b[i]))
return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Load64 will load from b at index i.
func Load64[I Indexer](b []byte, i I) uint64 {
//return binary.LittleEndian.Uint64(b[i:])
//return *(*uint64)(unsafe.Pointer(&b[i]))
return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Store16 will store v at b.
func Store16(b []byte, v uint16) {
//binary.LittleEndian.PutUint16(b, v)
*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
}
// Store32 will store v at b.
func Store32(b []byte, v uint32) {
//binary.LittleEndian.PutUint32(b, v)
*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
}
// Store64 will store v at b.
func Store64(b []byte, v uint64) {
//binary.LittleEndian.PutUint64(b, v)
*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
}

View File

@ -1,4 +1,3 @@
module github.com/klauspost/compress
go 1.19
go 1.22

View File

@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
This package is pure Go and without use of "unsafe".
This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
The `zstd` package is provided as open source software using a Go standard license.

View File

@ -5,11 +5,12 @@
package zstd
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@ -18,6 +19,7 @@ import (
type bitReader struct {
in []byte
value uint64 // Maybe use [16]byte, but shifting is awkward.
cursor int // offset where next read should end
bitsRead uint8
}
@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.cursor = len(in)
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
v := b.in[len(b.in)-4:]
b.in = b.in[:len(b.in)-4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.cursor -= 4
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
v := b.in[len(b.in)-8:]
b.in = b.in[:len(b.in)-8]
b.value = binary.LittleEndian.Uint64(v)
b.cursor -= 8
b.value = le.Load64(b.in, b.cursor)
b.bitsRead = 0
}
@ -87,25 +87,23 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if len(b.in) >= 4 {
v := b.in[len(b.in)-4:]
b.in = b.in[:len(b.in)-4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
if b.cursor >= 4 {
b.cursor -= 4
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
return
}
b.bitsRead -= uint8(8 * len(b.in))
for len(b.in) > 0 {
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
b.in = b.in[:len(b.in)-1]
b.bitsRead -= uint8(8 * b.cursor)
for b.cursor > 0 {
b.cursor -= 1
b.value = (b.value << 8) | uint64(b.in[b.cursor])
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
return len(b.in) == 0 && b.bitsRead >= 64
return b.cursor == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
b.cursor = 0
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}

View File

@ -5,14 +5,10 @@
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
// Extract blocks...
if false && hist.dict == nil {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
var buf bytes.Buffer
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil
}

View File

@ -9,6 +9,7 @@ import (
"fmt"
"math"
"math/bits"
"slices"
"github.com/klauspost/compress/huff0"
)
@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
// All 0
return 0
}
maxCount := func(a []uint32) int {
var max uint32
for _, v := range a {
if v > max {
max = v
}
}
return int(max)
}
cnt := maxCount(hist[:maxSym])
cnt := int(slices.Max(hist[:maxSym]))
if cnt == len(data) {
// RLE
return 0
@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
}
}
}
maxCount := func(a []uint32) int {
var max uint32
for _, v := range a {
if v > max {
max = v
}
}
return int(max)
}
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
}

View File

@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
}
// Read bytes from the decompressed stream into p.
// Returns the number of bytes written and any error that occurred.
// Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
var n int
@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
frame.history.decoders.br.cursor = 0
}
d.decoders <- block
}()

View File

@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(err)
}
if t < 0 {
err := fmt.Sprintf("s (%d) < 0", s)
err := fmt.Sprintf("t (%d) < 0", t)
panic(err)
}
if s-t > e.maxMatchOff {

View File

@ -7,20 +7,25 @@
package zstd
import (
"encoding/binary"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
left := len(a)
for left >= 8 {
diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
left -= 8
}
a = a[n:]
b = b[n:]
for i := range a {
if a[i] != b[i] {

View File

@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)

View File

@ -7,9 +7,9 @@
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
MOVBQZX 32(CX), BX
MOVBQZX 40(CX), BX
MOVQ (CX), AX
MOVQ 8(CX), SI
MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
MOVB BL, 32(AX)
MOVQ SI, 8(AX)
MOVB BL, 40(AX)
MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@ -335,9 +335,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
MOVBQZX 32(CX), BX
MOVBQZX 40(CX), BX
MOVQ (CX), AX
MOVQ 8(CX), SI
MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
MOVB BL, 32(AX)
MOVQ SI, 8(AX)
MOVB BL, 40(AX)
MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@ -634,9 +634,9 @@ error_overread:
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
MOVBQZX 32(BX), DX
MOVBQZX 40(BX), DX
MOVQ (BX), CX
MOVQ 8(BX), BX
MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
MOVB DL, 32(CX)
MOVQ BX, 8(CX)
MOVB DL, 40(CX)
MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@ -920,9 +920,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
MOVBQZX 32(BX), DX
MOVBQZX 40(BX), DX
MOVQ (BX), CX
MOVQ 8(BX), BX
MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
MOVB DL, 32(CX)
MOVQ BX, 8(CX)
MOVB DL, 40(CX)
MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@ -1787,9 +1787,9 @@ empty_seqs:
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
MOVBQZX 32(CX), BX
MOVBQZX 40(CX), BX
MOVQ (CX), AX
MOVQ 8(CX), SI
MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@ -2281,8 +2281,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
MOVB BL, 32(AX)
MOVQ SI, 8(AX)
MOVB BL, 40(AX)
MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@ -2349,9 +2349,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
MOVBQZX 32(BX), DX
MOVBQZX 40(BX), DX
MOVQ (BX), CX
MOVQ 8(BX), BX
MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@ -2801,8 +2801,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
MOVB DL, 32(CX)
MOVQ BX, 8(CX)
MOVB DL, 40(CX)
MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
@ -2869,9 +2869,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
MOVBQZX 32(CX), BX
MOVBQZX 40(CX), BX
MOVQ (CX), AX
MOVQ 8(CX), SI
MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@ -3465,8 +3465,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
MOVB BL, 32(AX)
MOVQ SI, 8(AX)
MOVB BL, 40(AX)
MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@ -3533,9 +3533,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
MOVBQZX 32(BX), DX
MOVBQZX 40(BX), DX
MOVQ (BX), CX
MOVQ 8(BX), BX
MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@ -4087,8 +4087,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
MOVB DL, 32(CX)
MOVQ BX, 8(CX)
MOVB DL, 40(CX)
MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX

View File

@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
for i := range seqs {
var ll, mo, ml int
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)

Some files were not shown because too many files have changed in this diff Show More