mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-09-01 23:18:41 +00:00
update pkg/metadata with better logging
Signed-off-by: Avi Deitcher <avi@deitcher.net>
This commit is contained in:
@@ -11,7 +11,7 @@ init:
|
||||
onboot:
|
||||
# support metadata for optional config in /run/config
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
- name: sysctl
|
||||
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
|
||||
- name: sysfs
|
||||
|
@@ -13,7 +13,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
command: ["/usr/bin/metadata", "openstack"]
|
||||
services:
|
||||
- name: rngd
|
||||
|
@@ -13,7 +13,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
services:
|
||||
- name: rngd
|
||||
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
|
||||
|
@@ -13,7 +13,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
services:
|
||||
- name: getty
|
||||
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
|
||||
|
@@ -18,7 +18,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
command: ["/usr/bin/metadata", "hetzner"]
|
||||
services:
|
||||
- name: rngd
|
||||
|
@@ -18,7 +18,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
command: ["/usr/bin/metadata", "packet"]
|
||||
services:
|
||||
- name: rngd
|
||||
|
@@ -16,7 +16,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
services:
|
||||
- name: getty
|
||||
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
|
||||
|
@@ -13,7 +13,7 @@ onboot:
|
||||
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
- name: metadata
|
||||
image: linuxkit/metadata:48846eecc1a923fd48d9c0e22b2e6c3ea09f01df
|
||||
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
|
||||
command: ["/usr/bin/metadata", "vultr"]
|
||||
services:
|
||||
- name: getty
|
||||
|
@@ -3,13 +3,10 @@ module github.com/linuxkit/linuxkit/pkg/metadata
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/diskfs/go-diskfs v1.2.1-0.20230123115902-fce1828bbbfa
|
||||
github.com/diskfs/go-diskfs v1.3.1-0.20230612151643-22d22fd7e558
|
||||
github.com/packethost/packngo v0.1.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/vishvananda/netlink v0.0.0-20170808154308-f5a6f697a596
|
||||
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20180515001509-1a580b3eff78 // indirect
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
||||
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3
|
||||
)
|
||||
|
@@ -1,118 +1,41 @@
|
||||
4d63.com/gochecknoinits v0.0.0-20200108094044-eb73b47b9fc4/go.mod h1:4o1i5aXtIF5tJFt3UD1knCVmWOXg7fLYdHVu6jeNcnM=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/diskfs/go-diskfs v1.1.1 h1:rMjLpaydtXGVZb7mdkRGK1+//30i76nKAit89zUzeaI=
|
||||
github.com/diskfs/go-diskfs v1.1.1/go.mod h1:afUPxxu+x1snp4aCY2bKR0CoZ/YFJewV3X2UEr2nPZE=
|
||||
github.com/diskfs/go-diskfs v1.2.0 h1:Ow4xorEDw1VNYKbC+SA/qQNwi5gWIwdKUxmUcLFST24=
|
||||
github.com/diskfs/go-diskfs v1.2.0/go.mod h1:ZTeTbzixuyfnZW5y5qKMtjV2o+GLLHo1KfMhotJI4Rk=
|
||||
github.com/diskfs/go-diskfs v1.2.1-0.20230123115902-fce1828bbbfa h1:IjMOtaIqh7PYR3Pw06fMJp0UaWp4g1meiNTwcmH0Aho=
|
||||
github.com/diskfs/go-diskfs v1.2.1-0.20230123115902-fce1828bbbfa/go.mod h1:3pUpCAz75Q11om5RsGpVKUgXp2Z+ATw1xV500glmCP0=
|
||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||
github.com/diskfs/go-diskfs v1.3.1-0.20230612151643-22d22fd7e558 h1:2H5E+tttRQpUKWjIHP66cCnSrn1Z7MWn4O3piQq3TgU=
|
||||
github.com/diskfs/go-diskfs v1.3.1-0.20230612151643-22d22fd7e558/go.mod h1:G8cyy+ngM+3yKlqjweMmtqvE+TxsnIo1xumbJX1AeLg=
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY=
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
|
||||
github.com/jgautheron/goconst v0.0.0-20170703170152-9740945f5dcb/go.mod h1:82TxjOpWQiPmywlbIaB2ZkqJoSYJdLGPgAJDvM3PbKc=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mibk/dupl v1.0.0/go.mod h1:pCr4pNxxIbFGvtyCOi0c7LVjmV6duhKWV+ex5vh38ME=
|
||||
github.com/packethost/packngo v0.1.0 h1:G/5zumXb2fbPm5MAM3y8MmugE66Ehpio5qx0IhdhTPc=
|
||||
github.com/packethost/packngo v0.1.0/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M=
|
||||
github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
|
||||
github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
|
||||
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
|
||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/sirupsen/logrus v1.0.3 h1:B5C/igNWoiULof20pKfY4VntcIPqKuwEmoLZrabbUrc=
|
||||
github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stripe/safesql v0.2.0/go.mod h1:q7b2n0JmzM1mVGfcYpanfVb2j23cXZeWFxcILPn3JV4=
|
||||
github.com/tsenart/deadcode v0.0.0-20160724212837-210d2dc333e9/go.mod h1:q+QjxYvZ+fpjMXqs+XEriussHjSYqeXVnAdSV1tkMYk=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vishvananda/netlink v0.0.0-20170808154308-f5a6f697a596 h1:K6pwCps8j1ylaB37G0r6hGajvbNsdm+0ITJ6L88r65w=
|
||||
github.com/vishvananda/netlink v0.0.0-20170808154308-f5a6f697a596/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3 h1:NcYCJC+LbOrfvuf/uHeM/kxh6vOmiuInC4GAWRdc+P0=
|
||||
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
golang.org/x/crypto v0.0.0-20180515001509-1a580b3eff78 h1:uJIReYEB1ZZLarzi83Pmig1HhZ/cwFCysx05l0PFBIk=
|
||||
golang.org/x/crypto v0.0.0-20180515001509-1a580b3eff78/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 h1:v6jG/tdl4O07LNVp74Nt7/OyL+1JsIW1M2f/nSvQheY=
|
||||
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20200102200121-6de373a2766c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM=
|
||||
gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
|
||||
gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o=
|
||||
gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
|
||||
|
@@ -82,6 +82,7 @@ func FindCIs() []string {
|
||||
log.Debugf("failed to open device read-only: %s: %v", dev, err)
|
||||
continue
|
||||
}
|
||||
disk.DefaultBlocks = true // because this is passed through as a block device, we can get strange blocksize numbers from the OS
|
||||
fs, err := disk.GetFilesystem(0)
|
||||
if err != nil {
|
||||
log.Debugf("failed to get filesystem on partition 0 for device: %s: %v", dev, err)
|
||||
|
3
pkg/metadata/vendor/github.com/diskfs/go-diskfs/.golangci.yml
generated
vendored
3
pkg/metadata/vendor/github.com/diskfs/go-diskfs/.golangci.yml
generated
vendored
@@ -21,7 +21,6 @@ linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -43,14 +42,12 @@ linters:
|
||||
- predeclared
|
||||
- revive
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- thelper
|
||||
- tparallel
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- varcheck
|
||||
- whitespace
|
||||
# - wsl # лишние пустые строки и т.д., чистый стиль
|
||||
# - goconst # проверка на наличие переменных, которых следовало бы вынести в const
|
||||
|
2
pkg/metadata/vendor/github.com/diskfs/go-diskfs/Makefile
generated
vendored
2
pkg/metadata/vendor/github.com/diskfs/go-diskfs/Makefile
generated
vendored
@@ -6,7 +6,7 @@ GOENV ?= GO111MODULE=on CGO_ENABLED=0
|
||||
GO_FILES ?= $(shell $(GOENV) go list ./...)
|
||||
GOBIN ?= $(shell go env GOPATH)/bin
|
||||
LINTER ?= $(GOBIN)/golangci-lint
|
||||
LINTER_VERSION ?= v1.49.0
|
||||
LINTER_VERSION ?= v1.51.2
|
||||
|
||||
# BUILDARCH is the host architecture
|
||||
# ARCH is the target architecture
|
||||
|
17
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs.go
generated
vendored
17
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs.go
generated
vendored
@@ -105,7 +105,6 @@ package diskfs
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -139,6 +138,8 @@ const (
|
||||
ReadOnly OpenModeOption = iota
|
||||
// ReadWriteExclusive open file in read-write exclusive mode
|
||||
ReadWriteExclusive
|
||||
// ReadWrite open file in read-write mode
|
||||
ReadWrite
|
||||
)
|
||||
|
||||
// OpenModeOption.String()
|
||||
@@ -148,6 +149,8 @@ func (m OpenModeOption) String() string {
|
||||
return "read-only"
|
||||
case ReadWriteExclusive:
|
||||
return "read-write exclusive"
|
||||
case ReadWrite:
|
||||
return "read-write"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -156,6 +159,7 @@ func (m OpenModeOption) String() string {
|
||||
var openModeOptions = map[OpenModeOption]int{
|
||||
ReadOnly: os.O_RDONLY,
|
||||
ReadWriteExclusive: os.O_RDWR | os.O_EXCL,
|
||||
ReadWrite: os.O_RDWR,
|
||||
}
|
||||
|
||||
// SectorSize represents the sector size to use
|
||||
@@ -213,14 +217,9 @@ func initDisk(f *os.File, openMode OpenModeOption, sectorSize SectorSize) (*disk
|
||||
case mode&os.ModeDevice != 0:
|
||||
log.Debug("initDisk(): block device")
|
||||
diskType = disk.Device
|
||||
file, err := os.Open(f.Name())
|
||||
size, err = getBlockDeviceSize(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening block device %s: %s", f.Name(), err)
|
||||
}
|
||||
defer file.Close()
|
||||
size, err = file.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error seeking to end of block device %s: %s", f.Name(), err)
|
||||
return nil, fmt.Errorf("error getting block device %s size: %s", f.Name(), err)
|
||||
}
|
||||
lblksize, pblksize, err = getSectorSizes(f)
|
||||
log.Debugf("initDisk(): logical block size %d, physical block size %d", lblksize, pblksize)
|
||||
@@ -327,7 +326,7 @@ func Open(device string, opts ...OpenOpt) (*disk.Disk, error) {
|
||||
|
||||
f, err := os.OpenFile(device, m, 0o600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not open device %s exclusively for writing", device)
|
||||
return nil, fmt.Errorf("could not open device %s with mode %v: %w", device, m, err)
|
||||
}
|
||||
// return our disk
|
||||
return initDisk(f, ReadWriteExclusive, opt.sectorSize)
|
||||
|
21
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs_darwin.go
generated
vendored
21
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs_darwin.go
generated
vendored
@@ -14,13 +14,24 @@ const (
|
||||
DKIOCGETBLOCKCOUNT = 0x40086419
|
||||
)
|
||||
|
||||
// getBlockDeviceSize get the size of an opened block device in Bytes.
|
||||
func getBlockDeviceSize(f *os.File) (int64, error) {
|
||||
fd := f.Fd()
|
||||
|
||||
blockSize, err := unix.IoctlGetInt(int(fd), DKIOCGETBLOCKSIZE)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to get device logical sector size: %v", err)
|
||||
}
|
||||
|
||||
blockCount, err := unix.IoctlGetInt(int(fd), DKIOCGETBLOCKCOUNT)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to get device block count: %v", err)
|
||||
}
|
||||
return int64(blockSize) * int64(blockCount), nil
|
||||
}
|
||||
|
||||
// getSectorSizes get the logical and physical sector sizes for a block device
|
||||
func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
|
||||
//nolint:gocritic // we keep this for reference to the underlying syscall
|
||||
/*
|
||||
ioctl(fd, BLKPBSZGET, &physicalsectsize);
|
||||
|
||||
*/
|
||||
fd := f.Fd()
|
||||
|
||||
logicalSectorSizeInt, err := unix.IoctlGetInt(int(fd), DKIOCGETBLOCKSIZE)
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build linux || solaris || aix || freebsd || illumos || netbsd || openbsd || plan9
|
||||
// +build linux solaris aix freebsd illumos netbsd openbsd plan9
|
||||
|
||||
package diskfs
|
||||
|
||||
import (
|
||||
@@ -10,6 +7,15 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// getBlockDeviceSize get the size of an opened block device in Bytes.
|
||||
func getBlockDeviceSize(f *os.File) (int64, error) {
|
||||
blockDeviceSize, err := unix.IoctlGetInt(int(f.Fd()), unix.BLKGETSIZE64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to get block device size: %v", err)
|
||||
}
|
||||
return int64(blockDeviceSize), nil
|
||||
}
|
||||
|
||||
// getSectorSizes get the logical and physical sector sizes for a block device
|
||||
func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
|
||||
//
|
18
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs_other.go
generated
vendored
Normal file
18
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs_other.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build !windows && !linux && !darwin
|
||||
|
||||
package diskfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// getBlockDeviceSize get the size of an opened block device in Bytes.
|
||||
func getBlockDeviceSize(f *os.File) (int64, error) {
|
||||
return 0, errors.New("block devices not supported on this platform")
|
||||
}
|
||||
|
||||
// getSectorSizes get the logical and physical sector sizes for a block device
|
||||
func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
|
||||
return 0, 0, errors.New("block devices not supported on this platform")
|
||||
}
|
5
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs_windows.go
generated
vendored
5
pkg/metadata/vendor/github.com/diskfs/go-diskfs/diskfs_windows.go
generated
vendored
@@ -5,6 +5,11 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// getBlockDeviceSize get the size of an opened block device in Bytes.
|
||||
func getBlockDeviceSize(f *os.File) (int64, error) {
|
||||
return 0, errors.New("block devices not supported on windows")
|
||||
}
|
||||
|
||||
// getSectorSizes get the logical and physical sector sizes for a block device
|
||||
func getSectorSizes(f *os.File) (int64, int64, error) {
|
||||
return 0, 0, errors.New("block devices not supported on windows")
|
||||
|
116
pkg/metadata/vendor/github.com/diskfs/go-diskfs/filesystem/compatibility.go
generated
vendored
Normal file
116
pkg/metadata/vendor/github.com/diskfs/go-diskfs/filesystem/compatibility.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
type fsCompatible struct {
|
||||
fs FileSystem
|
||||
}
|
||||
|
||||
type fsFileWrapper struct {
|
||||
File
|
||||
stat os.FileInfo
|
||||
}
|
||||
|
||||
type fakeRootDir struct{}
|
||||
|
||||
func (d *fakeRootDir) Name() string { return "/" }
|
||||
func (d *fakeRootDir) Size() int64 { return 0 }
|
||||
func (d *fakeRootDir) Mode() fs.FileMode { return 0 }
|
||||
func (d *fakeRootDir) ModTime() time.Time { return time.Now() }
|
||||
func (d *fakeRootDir) IsDir() bool { return true }
|
||||
func (d *fakeRootDir) Sys() any { return nil }
|
||||
|
||||
type fsDirWrapper struct {
|
||||
name string
|
||||
compat *fsCompatible
|
||||
stat os.FileInfo
|
||||
}
|
||||
|
||||
func (f *fsDirWrapper) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fsDirWrapper) Read([]byte) (int, error) {
|
||||
return 0, fs.ErrInvalid
|
||||
}
|
||||
|
||||
func (f *fsDirWrapper) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||
entries, err := f.compat.ReadDir(f.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n < 0 || n >= len(entries) {
|
||||
n = len(entries)
|
||||
}
|
||||
return entries[:n], nil
|
||||
}
|
||||
|
||||
func (f *fsDirWrapper) Stat() (fs.FileInfo, error) {
|
||||
return f.stat, nil
|
||||
}
|
||||
|
||||
func (f *fsFileWrapper) Stat() (fs.FileInfo, error) {
|
||||
return f.stat, nil
|
||||
}
|
||||
|
||||
// Converts the relative path name to an absolute one
|
||||
func absoluteName(name string) string {
|
||||
if name == "." {
|
||||
name = "/"
|
||||
}
|
||||
if name[0] != '/' {
|
||||
name = "/" + name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (f *fsCompatible) Open(name string) (fs.File, error) {
|
||||
var stat os.FileInfo
|
||||
name = absoluteName(name)
|
||||
if name == "/" {
|
||||
return &fsDirWrapper{name: name, compat: f, stat: &fakeRootDir{}}, nil
|
||||
}
|
||||
dirname := path.Dir(name)
|
||||
if info, err := f.fs.ReadDir(dirname); err == nil {
|
||||
for i := range info {
|
||||
if info[i].Name() == path.Base(name) {
|
||||
stat = info[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if stat == nil {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return &fsDirWrapper{name: name, compat: f, stat: stat}, nil
|
||||
}
|
||||
file, err := f.fs.OpenFile(name, os.O_RDONLY)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fsFileWrapper{File: file, stat: stat}, nil
|
||||
}
|
||||
|
||||
func (f *fsCompatible) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
entries, err := f.fs.ReadDir(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
direntries := make([]fs.DirEntry, len(entries))
|
||||
for i := range entries {
|
||||
direntries[i] = fs.FileInfoToDirEntry(entries[i])
|
||||
}
|
||||
return direntries, nil
|
||||
}
|
||||
|
||||
// FS converts a diskfs FileSystem to a fs.FS for compatibility with
|
||||
// other utilities
|
||||
func FS(f FileSystem) fs.ReadDirFS {
|
||||
return &fsCompatible{f}
|
||||
}
|
61
pkg/metadata/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/directoryentry.go
generated
vendored
61
pkg/metadata/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/directoryentry.go
generated
vendored
@@ -6,6 +6,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/elliotwutingfeng/asciiset"
|
||||
)
|
||||
|
||||
// AccessRights is the byte mask representing access rights to a FAT file
|
||||
@@ -18,60 +20,7 @@ const (
|
||||
)
|
||||
|
||||
// valid shortname characters - [A-F][0-9][$%'-_@~`!(){}^#&]
|
||||
var validShortNameCharacters = map[byte]bool{
|
||||
0x21: true, // !
|
||||
0x23: true, // #
|
||||
0x24: true, // $
|
||||
0x25: true, // %
|
||||
0x26: true, // &
|
||||
0x27: true, // '
|
||||
0x28: true, // (
|
||||
0x29: true, // )
|
||||
0x2d: true, // -
|
||||
0x30: true, // 0
|
||||
0x31: true, // 1
|
||||
0x32: true, // 2
|
||||
0x33: true, // 3
|
||||
0x34: true, // 4
|
||||
0x35: true, // 5
|
||||
0x36: true, // 6
|
||||
0x37: true, // 7
|
||||
0x38: true, // 8
|
||||
0x39: true, // 9
|
||||
0x40: true, // @
|
||||
0x41: true, // A
|
||||
0x42: true, // B
|
||||
0x43: true, // C
|
||||
0x44: true, // D
|
||||
0x45: true, // E
|
||||
0x46: true, // F
|
||||
0x47: true, // G
|
||||
0x48: true, // H
|
||||
0x49: true, // I
|
||||
0x4a: true, // J
|
||||
0x4b: true, // K
|
||||
0x4c: true, // L
|
||||
0x4d: true, // M
|
||||
0x4e: true, // N
|
||||
0x4f: true, // O
|
||||
0x50: true, // P
|
||||
0x51: true, // Q
|
||||
0x52: true, // R
|
||||
0x53: true, // S
|
||||
0x54: true, // T
|
||||
0x55: true, // U
|
||||
0x56: true, // V
|
||||
0x57: true, // W
|
||||
0x58: true, // X
|
||||
0x59: true, // Y
|
||||
0x5a: true, // Z
|
||||
0x5e: true, // ^
|
||||
0x5f: true, // _
|
||||
0x60: true, // `
|
||||
0x7b: true, // {
|
||||
0x7d: true, // }
|
||||
0x7e: true, // ~
|
||||
}
|
||||
var validShortNameCharacters, _ = asciiset.MakeASCIISet("!#$%&'()-0123456789@ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`{}~")
|
||||
|
||||
// directoryEntry is a single directory entry
|
||||
//
|
||||
@@ -401,7 +350,7 @@ func stringToValidASCIIBytes(s string) ([]byte, error) {
|
||||
// now make sure every byte is valid
|
||||
for _, b2 := range b {
|
||||
// only valid chars - 0-9, A-Z, _, ~
|
||||
if validShortNameCharacters[b2] {
|
||||
if validShortNameCharacters.Contains(b2) {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("invalid 8.3 character")
|
||||
@@ -489,7 +438,7 @@ func uCaseValid(name string) string {
|
||||
r2 := make([]rune, 0, len(r))
|
||||
for _, val := range r {
|
||||
switch {
|
||||
case validShortNameCharacters[byte(val)]:
|
||||
case validShortNameCharacters.Contains(byte(val)):
|
||||
r2 = append(r2, val)
|
||||
case (0x61 <= val && val <= 0x7a):
|
||||
// lower-case characters should be upper-cased
|
||||
|
2
pkg/metadata/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go
generated
vendored
2
pkg/metadata/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go
generated
vendored
@@ -7,7 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/pierrec/lz4"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
"github.com/ulikunitz/xz"
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
21
pkg/metadata/vendor/github.com/diskfs/go-diskfs/go.mod
generated
vendored
21
pkg/metadata/vendor/github.com/diskfs/go-diskfs/go.mod
generated
vendored
@@ -1,16 +1,15 @@
|
||||
module github.com/diskfs/go-diskfs
|
||||
|
||||
go 1.16
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/frankban/quicktest v1.13.0 // indirect
|
||||
github.com/go-test/deep v1.0.8 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/pierrec/lz4 v2.3.0+incompatible
|
||||
github.com/pkg/xattr v0.4.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/ulikunitz/xz v0.5.10
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
||||
gopkg.in/djherbis/times.v1 v1.2.0
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab
|
||||
github.com/go-test/deep v1.0.8
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/pierrec/lz4/v4 v4.1.17
|
||||
github.com/pkg/xattr v0.4.9
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/ulikunitz/xz v0.5.11
|
||||
golang.org/x/sys v0.5.0
|
||||
gopkg.in/djherbis/times.v1 v1.3.0
|
||||
)
|
||||
|
56
pkg/metadata/vendor/github.com/diskfs/go-diskfs/go.sum
generated
vendored
56
pkg/metadata/vendor/github.com/diskfs/go-diskfs/go.sum
generated
vendored
@@ -1,37 +1,31 @@
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY=
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
|
||||
github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
|
||||
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM=
|
||||
gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o=
|
||||
gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
6
pkg/metadata/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go
generated
vendored
6
pkg/metadata/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package gpt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
// PartitionEntrySize fixed size of a GPT partition entry
|
||||
const PartitionEntrySize = 128
|
||||
|
||||
var zeroUUIDBytes = make([]byte, 16)
|
||||
|
||||
// Partition represents the structure of a single partition on the disk
|
||||
type Partition struct {
|
||||
Start uint64 // start sector for the partition
|
||||
@@ -90,6 +93,9 @@ func partitionFromBytes(b []byte, logicalSectorSize, physicalSectorSize int) (*P
|
||||
return nil, fmt.Errorf("data for partition was %d bytes instead of expected %d", len(b), PartitionEntrySize)
|
||||
}
|
||||
// is it all zeroes?
|
||||
if bytes.Equal(b[0:16], zeroUUIDBytes) {
|
||||
return nil, nil
|
||||
}
|
||||
typeGUID, err := uuid.FromBytes(bytesToUUIDBytes(b[0:16]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read partition type GUID: %v", err)
|
||||
|
5
pkg/metadata/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go
generated
vendored
5
pkg/metadata/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go
generated
vendored
@@ -294,7 +294,7 @@ func (t *Table) toGPTBytes(primary bool) ([]byte, error) {
|
||||
copy(b[56:72], bytesToUUIDBytes(guid[0:16]))
|
||||
|
||||
// starting LBA of array of partition entries
|
||||
binary.LittleEndian.PutUint64(b[72:80], t.partitionArraySector(primary))
|
||||
binary.LittleEndian.PutUint64(b[72:80], t.partitionArraySector(true))
|
||||
|
||||
// how many entries?
|
||||
binary.LittleEndian.PutUint32(b[80:84], uint32(t.partitionArraySize))
|
||||
@@ -337,6 +337,9 @@ func readPartitionArrayBytes(b []byte, entrySize, logicalSectorSize, physicalSec
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading partition entry %d: %v", i, err)
|
||||
}
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
// augment partition information
|
||||
p.Size = (p.End - p.Start + 1) * uint64(logicalSectorSize)
|
||||
parts = append(parts, p)
|
||||
|
23
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/.gitignore
generated
vendored
Normal file
23
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# If you prefer the allow list template instead of the deny list, see community template:
|
||||
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
|
||||
#
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
*.html
|
||||
*.prof
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
1
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/ASCII-Table.svg
generated
vendored
Normal file
1
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/ASCII-Table.svg
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 655 KiB |
128
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/CODE_OF_CONDUCT.md
generated
vendored
Normal file
128
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
wutingfeng@outlook.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
69
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/CREDITS.md
generated
vendored
Normal file
69
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/CREDITS.md
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
# Credits
|
||||
|
||||
This application uses code from other open-source projects. The copyright statements of these open-source projects are listed below.
|
||||
|
||||
## Bit
|
||||
|
||||
Source: <https://github.com/yourbasic/bit>
|
||||
|
||||
```markdown
|
||||
BSD 2-Clause License
|
||||
|
||||
Copyright (c) 2017, Stefan Nilsson
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
```
|
||||
|
||||
## Go
|
||||
|
||||
Source: <https://github.com/golang/go>
|
||||
|
||||
```markdown
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
```
|
28
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/LICENSE
generated
vendored
Normal file
28
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2022, Wu Tingfeng
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
20
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/Makefile
generated
vendored
Normal file
20
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/Makefile
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
tests:
|
||||
go test -v -race -covermode atomic -coverprofile coverage.out && go tool cover -html coverage.out -o coverage.html
|
||||
|
||||
tests_without_race:
|
||||
go test -v -covermode atomic -coverprofile coverage.out && go tool cover -html coverage.out -o coverage.html
|
||||
|
||||
format:
|
||||
go fmt ./...
|
||||
|
||||
bench:
|
||||
go test -bench . -benchmem -cpu 1
|
||||
|
||||
report_bench:
|
||||
go test -cpuprofile cpu.prof -memprofile mem.prof -bench . -cpu 1
|
||||
|
||||
cpu_report:
|
||||
go tool pprof cpu.prof
|
||||
|
||||
mem_report:
|
||||
go tool pprof mem.prof
|
95
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/README.md
generated
vendored
Normal file
95
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/README.md
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
# asciiset
|
||||
|
||||
[](https://pkg.go.dev/github.com/elliotwutingfeng/asciiset)
|
||||
[](https://goreportcard.com/report/github.com/elliotwutingfeng/asciiset)
|
||||
[](https://codecov.io/gh/elliotwutingfeng/asciiset)
|
||||
|
||||
[](LICENSE)
|
||||
|
||||
## Summary
|
||||
|
||||
**asciiset** is an [ASCII](https://simple.wikipedia.org/wiki/ASCII) character bitset.
|
||||
|
||||
Bitsets are fast and memory-efficient data structures for storing and retrieving information using bitwise operations.
|
||||
|
||||
**asciiset** is an extension of the **asciiSet** data structure from the Go Standard library [source code](https://cs.opensource.google/go/go/+/master:src/bytes/bytes.go).
|
||||
|
||||
Possible applications include checking strings for prohibited ASCII characters, and counting unique ASCII characters in a string.
|
||||
|
||||
Spot any bugs? Report them [here](https://github.com/elliotwutingfeng/asciiset/issues).
|
||||
|
||||

|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/elliotwutingfeng/asciiset
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
make tests
|
||||
|
||||
# Alternatively, run tests without race detection
|
||||
# Useful for systems that do not support the -race flag like windows/386
|
||||
# See https://tip.golang.org/src/cmd/dist/test.go
|
||||
make tests_without_race
|
||||
```
|
||||
|
||||
## Benchmarks
|
||||
|
||||
```bash
|
||||
make bench
|
||||
```
|
||||
|
||||
### Results
|
||||
|
||||
```text
|
||||
CPU: AMD Ryzen 7 5800X
|
||||
Time in nanoseconds (ns) | Lower is better
|
||||
|
||||
ASCIISet
|
||||
|
||||
Add() ▏ 891 🟦🟦🟦 11x faster
|
||||
|
||||
Contains() ▏ 580 🟦🟦 28x faster
|
||||
|
||||
Remove() ▏ 1570 🟦🟦🟦🟦 1.5x faster
|
||||
|
||||
Size() ▏ 313 🟦 equivalent
|
||||
|
||||
Visit() ▏ 1421 🟦🟦🟦🟦 3.5x faster
|
||||
|
||||
map[byte]struct{}
|
||||
|
||||
Add() ▏ 9850 🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥
|
||||
|
||||
Contains() ▏16605 🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥
|
||||
|
||||
Remove() ▏ 2510 🟥🟥🟥🟥🟥🟥
|
||||
|
||||
Size() ▏ 318 🟥
|
||||
|
||||
Visit() ▏ 5085 🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥
|
||||
```
|
||||
|
||||
```bash
|
||||
go test -bench . -benchmem -cpu 1
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/elliotwutingfeng/asciiset
|
||||
cpu: AMD Ryzen 7 5800X 8-Core Processor
|
||||
BenchmarkASCIISet/ASCIISet_Add() 1340958 891.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkASCIISet/ASCIISet_Contains() 2058140 580.9 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkASCIISet/ASCIISet_Remove() 762636 1570 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkASCIISet/ASCIISet_Size() 3808866 313.2 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkASCIISet/ASCIISet_Visit() 840808 1421 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkMapSet/map_Add 122043 9850 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkMapSet/map_Contains 72583 16605 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkMapSet/map_Remove 451785 2510 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkMapSet/map_Size 3789381 318.3 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkMapSet/map_Visit 235515 5085 ns/op 0 B/op 0 allocs/op
|
||||
PASS
|
||||
ok github.com/elliotwutingfeng/asciiset 14.438s
|
||||
```
|
116
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/asciiset.go
generated
vendored
Normal file
116
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/asciiset.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// Package asciiset is an ASCII character bitset
|
||||
package asciiset
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ASCIISet is a 36-byte value, where each bit in the first 32-bytes
|
||||
// represents the presence of a given ASCII character in the set.
|
||||
// The remaining 4-bytes is a counter for the number of ASCII characters in the set.
|
||||
// The 128-bits of the first 16 bytes, starting with the least-significant bit
|
||||
// of the lowest word to the most-significant bit of the highest word,
|
||||
// map to the full range of all 128 ASCII characters.
|
||||
// The 128-bits of the next 16 bytes will be zeroed,
|
||||
// ensuring that any non-ASCII character will be reported as not in the set.
|
||||
// Rejecting non-ASCII characters in this way avoids bounds checks in ASCIISet.Contains.
|
||||
type ASCIISet [9]uint32
|
||||
|
||||
// MakeASCIISet creates a set of ASCII characters and reports whether all
|
||||
// characters in chars are ASCII.
|
||||
func MakeASCIISet(chars string) (as ASCIISet, ok bool) {
|
||||
for i := 0; i < len(chars); i++ {
|
||||
c := chars[i]
|
||||
if c >= utf8.RuneSelf {
|
||||
return as, false
|
||||
}
|
||||
as.Add(c)
|
||||
}
|
||||
return as, true
|
||||
}
|
||||
|
||||
// Add inserts character c into the set.
|
||||
func (as *ASCIISet) Add(c byte) {
|
||||
if c < utf8.RuneSelf { // ensure that c is an ASCII byte
|
||||
before := as[c/32]
|
||||
as[c/32] |= 1 << (c % 32)
|
||||
if before != as[c/32] {
|
||||
as[8]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Contains reports whether c is inside the set.
|
||||
func (as *ASCIISet) Contains(c byte) bool {
|
||||
return (as[c/32] & (1 << (c % 32))) != 0
|
||||
}
|
||||
|
||||
// Remove removes c from the set
|
||||
//
|
||||
// if c is not in the set, the set contents will remain unchanged.
|
||||
func (as *ASCIISet) Remove(c byte) {
|
||||
if c < utf8.RuneSelf { // ensure that c is an ASCII byte
|
||||
before := as[c/32]
|
||||
as[c/32] &^= 1 << (c % 32)
|
||||
if before != as[c/32] {
|
||||
as[8]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of characters in the set.
|
||||
func (as *ASCIISet) Size() int {
|
||||
return int(as[8])
|
||||
}
|
||||
|
||||
// Union returns a new set containing all characters that belong to either as and as2.
|
||||
func (as *ASCIISet) Union(as2 ASCIISet) (as3 ASCIISet) {
|
||||
as3[0] = as[0] | as2[0]
|
||||
as3[1] = as[1] | as2[1]
|
||||
as3[2] = as[2] | as2[2]
|
||||
as3[3] = as[3] | as2[3]
|
||||
return
|
||||
}
|
||||
|
||||
// Intersection returns a new set containing all characters that belong to both as and as2.
|
||||
func (as *ASCIISet) Intersection(as2 ASCIISet) (as3 ASCIISet) {
|
||||
as3[0] = as[0] & as2[0]
|
||||
as3[1] = as[1] & as2[1]
|
||||
as3[2] = as[2] & as2[2]
|
||||
as3[3] = as[3] & as2[3]
|
||||
return
|
||||
}
|
||||
|
||||
// Subtract returns a new set containing all characters that belong to as but not as2.
|
||||
func (as *ASCIISet) Subtract(as2 ASCIISet) (as3 ASCIISet) {
|
||||
as3[0] = as[0] &^ as2[0]
|
||||
as3[1] = as[1] &^ as2[1]
|
||||
as3[2] = as[2] &^ as2[2]
|
||||
as3[3] = as[3] &^ as2[3]
|
||||
return
|
||||
}
|
||||
|
||||
// Equals reports whether as contains the same characters as as2.
|
||||
func (as *ASCIISet) Equals(as2 ASCIISet) bool {
|
||||
return as[0] == as2[0] && as[1] == as2[1] && as[2] == as2[2] && as[3] == as2[3]
|
||||
}
|
||||
|
||||
// Visit calls the do function for each character of as in ascending numerical order.
|
||||
// If do returns true, Visit returns immediately, skipping any remaining
|
||||
// characters, and returns true. It is safe for do to Add or Remove
|
||||
// characters. The behavior of Visit is undefined if do changes
|
||||
// the set in any other way.
|
||||
func (as *ASCIISet) Visit(do func(n byte) (skip bool)) (aborted bool) {
|
||||
var currentChar byte
|
||||
for i := uint(0); i < 4; i++ {
|
||||
for j := uint(0); j < 32; j++ {
|
||||
if (as[i] & (1 << j)) != 0 {
|
||||
if do(currentChar) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
currentChar++
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
25
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/codecov.yml
generated
vendored
Normal file
25
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
codecov:
|
||||
require_ci_to_pass: yes
|
||||
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "90...100"
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 90%
|
||||
threshold: 5%
|
||||
patch: off
|
||||
parsers:
|
||||
gcov:
|
||||
branch_detection:
|
||||
conditional: yes
|
||||
loop: yes
|
||||
method: no
|
||||
macro: no
|
||||
|
||||
comment:
|
||||
layout: "reach,diff,flags,files,footer"
|
||||
behavior: default
|
||||
require_changes: no
|
3
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/go.mod
generated
vendored
Normal file
3
pkg/metadata/vendor/github.com/elliotwutingfeng/asciiset/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/elliotwutingfeng/asciiset
|
||||
|
||||
go 1.11
|
2
pkg/metadata/vendor/github.com/google/uuid/README.md
generated
vendored
2
pkg/metadata/vendor/github.com/google/uuid/README.md
generated
vendored
@@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without
|
||||
installing this package by using the GoDoc site here:
|
||||
http://godoc.org/github.com/google/uuid
|
||||
http://pkg.go.dev/github.com/google/uuid
|
||||
|
4
pkg/metadata/vendor/github.com/google/uuid/hash.go
generated
vendored
4
pkg/metadata/vendor/github.com/google/uuid/hash.go
generated
vendored
@@ -26,8 +26,8 @@ var (
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:])
|
||||
h.Write(data)
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
|
7
pkg/metadata/vendor/github.com/google/uuid/marshal.go
generated
vendored
7
pkg/metadata/vendor/github.com/google/uuid/marshal.go
generated
vendored
@@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) {
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err == nil {
|
||||
*uuid = id
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
*uuid = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
|
118
pkg/metadata/vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
pkg/metadata/vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
2
pkg/metadata/vendor/github.com/google/uuid/sql.go
generated
vendored
2
pkg/metadata/vendor/github.com/google/uuid/sql.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
|
55
pkg/metadata/vendor/github.com/google/uuid/uuid.go
generated
vendored
55
pkg/metadata/vendor/github.com/google/uuid/uuid.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
@@ -33,7 +34,27 @@ const (
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
@@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
@@ -243,3 +264,31 @@ func SetRand(r io.Reader) {
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
12
pkg/metadata/vendor/github.com/google/uuid/version1.go
generated
vendored
12
pkg/metadata/vendor/github.com/google/uuid/version1.go
generated
vendored
@@ -17,12 +17,6 @@ import (
|
||||
//
|
||||
// In most cases, New should be used.
|
||||
func NewUUID() (UUID, error) {
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
nodeMu.Unlock()
|
||||
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
@@ -38,7 +32,13 @@ func NewUUID() (UUID, error) {
|
||||
binary.BigEndian.PutUint16(uuid[4:], timeMid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], timeHi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
|
40
pkg/metadata/vendor/github.com/google/uuid/version4.go
generated
vendored
40
pkg/metadata/vendor/github.com/google/uuid/version4.go
generated
vendored
@@ -14,11 +14,21 @@ func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
@@ -27,8 +37,16 @@ func New() UUID {
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
var uuid UUID
|
||||
_, err := io.ReadFull(rander, uuid[:])
|
||||
_, err := io.ReadFull(r, uuid[:])
|
||||
if err != nil {
|
||||
return Nil, err
|
||||
}
|
||||
@@ -36,3 +54,23 @@ func NewRandom() (UUID, error) {
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
24
pkg/metadata/vendor/github.com/pierrec/lz4/.travis.yml
generated
vendored
24
pkg/metadata/vendor/github.com/pierrec/lz4/.travis.yml
generated
vendored
@@ -1,24 +0,0 @@
|
||||
language: go
|
||||
|
||||
env:
|
||||
- GO111MODULE=off
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- master
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: master
|
||||
|
||||
sudo: false
|
||||
|
||||
script:
|
||||
- go test -v -cpu=2
|
||||
- go test -v -cpu=2 -race
|
||||
- go test -v -cpu=2 -tags noasm
|
||||
- go test -v -cpu=2 -race -tags noasm
|
23
pkg/metadata/vendor/github.com/pierrec/lz4/debug.go
generated
vendored
23
pkg/metadata/vendor/github.com/pierrec/lz4/debug.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
// +build lz4debug
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const debugFlag = true
|
||||
|
||||
func debug(args ...interface{}) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
file = filepath.Base(file)
|
||||
|
||||
f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
|
||||
if f[len(f)-1] != '\n' {
|
||||
f += "\n"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, f, args[1:]...)
|
||||
}
|
7
pkg/metadata/vendor/github.com/pierrec/lz4/debug_stub.go
generated
vendored
7
pkg/metadata/vendor/github.com/pierrec/lz4/debug_stub.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
// +build !lz4debug
|
||||
|
||||
package lz4
|
||||
|
||||
const debugFlag = false
|
||||
|
||||
func debug(args ...interface{}) {}
|
8
pkg/metadata/vendor/github.com/pierrec/lz4/decode_amd64.go
generated
vendored
8
pkg/metadata/vendor/github.com/pierrec/lz4/decode_amd64.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package lz4
|
||||
|
||||
//go:noescape
|
||||
func decodeBlock(dst, src []byte) int
|
98
pkg/metadata/vendor/github.com/pierrec/lz4/decode_other.go
generated
vendored
98
pkg/metadata/vendor/github.com/pierrec/lz4/decode_other.go
generated
vendored
@@ -1,98 +0,0 @@
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package lz4
|
||||
|
||||
func decodeBlock(dst, src []byte) (ret int) {
|
||||
const hasError = -2
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
ret = hasError
|
||||
}
|
||||
}()
|
||||
|
||||
var si, di int
|
||||
for {
|
||||
// Literals and match lengths (token).
|
||||
b := int(src[si])
|
||||
si++
|
||||
|
||||
// Literals.
|
||||
if lLen := b >> 4; lLen > 0 {
|
||||
switch {
|
||||
case lLen < 0xF && si+16 < len(src):
|
||||
// Shortcut 1
|
||||
// if we have enough room in src and dst, and the literals length
|
||||
// is small enough (0..14) then copy all 16 bytes, even if not all
|
||||
// are part of the literals.
|
||||
copy(dst[di:], src[si:si+16])
|
||||
si += lLen
|
||||
di += lLen
|
||||
if mLen := b & 0xF; mLen < 0xF {
|
||||
// Shortcut 2
|
||||
// if the match length (4..18) fits within the literals, then copy
|
||||
// all 18 bytes, even if not all are part of the literals.
|
||||
mLen += 4
|
||||
if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
|
||||
i := di - offset
|
||||
end := i + 18
|
||||
if end > len(dst) {
|
||||
// The remaining buffer may not hold 18 bytes.
|
||||
// See https://github.com/pierrec/lz4/issues/51.
|
||||
end = len(dst)
|
||||
}
|
||||
copy(dst[di:], dst[i:end])
|
||||
si += 2
|
||||
di += mLen
|
||||
continue
|
||||
}
|
||||
}
|
||||
case lLen == 0xF:
|
||||
for src[si] == 0xFF {
|
||||
lLen += 0xFF
|
||||
si++
|
||||
}
|
||||
lLen += int(src[si])
|
||||
si++
|
||||
fallthrough
|
||||
default:
|
||||
copy(dst[di:di+lLen], src[si:si+lLen])
|
||||
si += lLen
|
||||
di += lLen
|
||||
}
|
||||
}
|
||||
if si >= len(src) {
|
||||
return di
|
||||
}
|
||||
|
||||
offset := int(src[si]) | int(src[si+1])<<8
|
||||
if offset == 0 {
|
||||
return hasError
|
||||
}
|
||||
si += 2
|
||||
|
||||
// Match.
|
||||
mLen := b & 0xF
|
||||
if mLen == 0xF {
|
||||
for src[si] == 0xFF {
|
||||
mLen += 0xFF
|
||||
si++
|
||||
}
|
||||
mLen += int(src[si])
|
||||
si++
|
||||
}
|
||||
mLen += minMatch
|
||||
|
||||
// Copy the match.
|
||||
expanded := dst[di-offset:]
|
||||
if mLen > offset {
|
||||
// Efficiently copy the match dst[di-offset:di] into the dst slice.
|
||||
bytesToCopy := offset * (mLen / offset)
|
||||
for n := offset; n <= bytesToCopy+offset; n *= 2 {
|
||||
copy(expanded[n:], expanded[:n])
|
||||
}
|
||||
di += bytesToCopy
|
||||
mLen -= bytesToCopy
|
||||
}
|
||||
di += copy(dst[di:di+mLen], expanded[:mLen])
|
||||
}
|
||||
}
|
30
pkg/metadata/vendor/github.com/pierrec/lz4/errors.go
generated
vendored
30
pkg/metadata/vendor/github.com/pierrec/lz4/errors.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
rdebug "runtime/debug"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
|
||||
// block is corrupted or the destination buffer is not large enough for the uncompressed data.
|
||||
ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
|
||||
// ErrInvalid is returned when reading an invalid LZ4 archive.
|
||||
ErrInvalid = errors.New("lz4: bad magic number")
|
||||
// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
|
||||
ErrBlockDependency = errors.New("lz4: block dependency not supported")
|
||||
// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
|
||||
ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
|
||||
)
|
||||
|
||||
func recoverBlock(e *error) {
|
||||
if r := recover(); r != nil && *e == nil {
|
||||
if debugFlag {
|
||||
fmt.Fprintln(os.Stderr, r)
|
||||
rdebug.PrintStack()
|
||||
}
|
||||
*e = ErrInvalidSourceShortBuffer
|
||||
}
|
||||
}
|
66
pkg/metadata/vendor/github.com/pierrec/lz4/lz4.go
generated
vendored
66
pkg/metadata/vendor/github.com/pierrec/lz4/lz4.go
generated
vendored
@@ -1,66 +0,0 @@
|
||||
// Package lz4 implements reading and writing lz4 compressed data (a frame),
|
||||
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
|
||||
//
|
||||
// Although the block level compression and decompression functions are exposed and are fully compatible
|
||||
// with the lz4 block format definition, they are low level and should not be used directly.
|
||||
// For a complete description of an lz4 compressed block, see:
|
||||
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
|
||||
//
|
||||
// See https://github.com/Cyan4973/lz4 for the reference C implementation.
|
||||
//
|
||||
package lz4
|
||||
|
||||
const (
|
||||
// Extension is the LZ4 frame file name extension
|
||||
Extension = ".lz4"
|
||||
// Version is the LZ4 frame format version
|
||||
Version = 1
|
||||
|
||||
frameMagic uint32 = 0x184D2204
|
||||
frameSkipMagic uint32 = 0x184D2A50
|
||||
|
||||
// The following constants are used to setup the compression algorithm.
|
||||
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
|
||||
winSizeLog = 16 // LZ4 64Kb window size limit
|
||||
winSize = 1 << winSizeLog
|
||||
winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
|
||||
compressedBlockFlag = 1 << 31
|
||||
compressedBlockMask = compressedBlockFlag - 1
|
||||
|
||||
// hashLog determines the size of the hash table used to quickly find a previous match position.
|
||||
// Its value influences the compression speed and memory usage, the lower the faster,
|
||||
// but at the expense of the compression ratio.
|
||||
// 16 seems to be the best compromise for fast compression.
|
||||
hashLog = 16
|
||||
htSize = 1 << hashLog
|
||||
|
||||
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
|
||||
)
|
||||
|
||||
// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
|
||||
const (
|
||||
blockSize64K = 64 << 10
|
||||
blockSize256K = 256 << 10
|
||||
blockSize1M = 1 << 20
|
||||
blockSize4M = 4 << 20
|
||||
)
|
||||
|
||||
var (
|
||||
bsMapID = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M}
|
||||
bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7}
|
||||
)
|
||||
|
||||
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
|
||||
// The default values match those of the LZ4 frame format definition
|
||||
// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
|
||||
//
|
||||
// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
|
||||
// It is the caller responsibility to check them if necessary.
|
||||
type Header struct {
|
||||
BlockChecksum bool // Compressed blocks checksum flag.
|
||||
NoChecksum bool // Frame checksum flag.
|
||||
BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
|
||||
Size uint64 // Frame total size. It is _not_ computed by the Writer.
|
||||
CompressionLevel int // Compression level (higher is better, use 0 for fastest compression).
|
||||
done bool // Header processed flag (Read or Write and checked).
|
||||
}
|
29
pkg/metadata/vendor/github.com/pierrec/lz4/lz4_go1.10.go
generated
vendored
29
pkg/metadata/vendor/github.com/pierrec/lz4/lz4_go1.10.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
//+build go1.10
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (h Header) String() string {
|
||||
var s strings.Builder
|
||||
|
||||
s.WriteString(fmt.Sprintf("%T{", h))
|
||||
if h.BlockChecksum {
|
||||
s.WriteString("BlockChecksum: true ")
|
||||
}
|
||||
if h.NoChecksum {
|
||||
s.WriteString("NoChecksum: true ")
|
||||
}
|
||||
if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
|
||||
s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
|
||||
}
|
||||
if l := h.CompressionLevel; l != 0 {
|
||||
s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
|
||||
}
|
||||
s.WriteByte('}')
|
||||
|
||||
return s.String()
|
||||
}
|
29
pkg/metadata/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
generated
vendored
29
pkg/metadata/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
//+build !go1.10
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (h Header) String() string {
|
||||
var s bytes.Buffer
|
||||
|
||||
s.WriteString(fmt.Sprintf("%T{", h))
|
||||
if h.BlockChecksum {
|
||||
s.WriteString("BlockChecksum: true ")
|
||||
}
|
||||
if h.NoChecksum {
|
||||
s.WriteString("NoChecksum: true ")
|
||||
}
|
||||
if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
|
||||
s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
|
||||
}
|
||||
if l := h.CompressionLevel; l != 0 {
|
||||
s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
|
||||
}
|
||||
s.WriteByte('}')
|
||||
|
||||
return s.String()
|
||||
}
|
335
pkg/metadata/vendor/github.com/pierrec/lz4/reader.go
generated
vendored
335
pkg/metadata/vendor/github.com/pierrec/lz4/reader.go
generated
vendored
@@ -1,335 +0,0 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pierrec/lz4/internal/xxh32"
|
||||
)
|
||||
|
||||
// Reader implements the LZ4 frame decoder.
|
||||
// The Header is set after the first call to Read().
|
||||
// The Header may change between Read() calls in case of concatenated frames.
|
||||
type Reader struct {
|
||||
Header
|
||||
// Handler called when a block has been successfully read.
|
||||
// It provides the number of bytes read.
|
||||
OnBlockDone func(size int)
|
||||
|
||||
buf [8]byte // Scrap buffer.
|
||||
pos int64 // Current position in src.
|
||||
src io.Reader // Source.
|
||||
zdata []byte // Compressed data.
|
||||
data []byte // Uncompressed data.
|
||||
idx int // Index of unread bytes into data.
|
||||
checksum xxh32.XXHZero // Frame hash.
|
||||
skip int64 // Bytes to skip before next read.
|
||||
dpos int64 // Position in dest
|
||||
}
|
||||
|
||||
// NewReader returns a new LZ4 frame decoder.
|
||||
// No access to the underlying io.Reader is performed.
|
||||
func NewReader(src io.Reader) *Reader {
|
||||
r := &Reader{src: src}
|
||||
return r
|
||||
}
|
||||
|
||||
// readHeader checks the frame magic number and parses the frame descriptoz.
|
||||
// Skippable frames are supported even as a first frame although the LZ4
|
||||
// specifications recommends skippable frames not to be used as first frames.
|
||||
func (z *Reader) readHeader(first bool) error {
|
||||
defer z.checksum.Reset()
|
||||
|
||||
buf := z.buf[:]
|
||||
for {
|
||||
magic, err := z.readUint32()
|
||||
if err != nil {
|
||||
z.pos += 4
|
||||
if !first && err == io.ErrUnexpectedEOF {
|
||||
return io.EOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if magic == frameMagic {
|
||||
break
|
||||
}
|
||||
if magic>>8 != frameSkipMagic>>8 {
|
||||
return ErrInvalid
|
||||
}
|
||||
skipSize, err := z.readUint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.pos += 4
|
||||
m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.pos += m
|
||||
}
|
||||
|
||||
// Header.
|
||||
if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
z.pos += 8
|
||||
|
||||
b := buf[0]
|
||||
if v := b >> 6; v != Version {
|
||||
return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
|
||||
}
|
||||
if b>>5&1 == 0 {
|
||||
return ErrBlockDependency
|
||||
}
|
||||
z.BlockChecksum = b>>4&1 > 0
|
||||
frameSize := b>>3&1 > 0
|
||||
z.NoChecksum = b>>2&1 == 0
|
||||
|
||||
bmsID := buf[1] >> 4 & 0x7
|
||||
bSize, ok := bsMapID[bmsID]
|
||||
if !ok {
|
||||
return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
|
||||
}
|
||||
z.BlockMaxSize = bSize
|
||||
|
||||
// Allocate the compressed/uncompressed buffers.
|
||||
// The compressed buffer cannot exceed the uncompressed one.
|
||||
if n := 2 * bSize; cap(z.zdata) < n {
|
||||
z.zdata = make([]byte, n, n)
|
||||
}
|
||||
if debugFlag {
|
||||
debug("header block max size id=%d size=%d", bmsID, bSize)
|
||||
}
|
||||
z.zdata = z.zdata[:bSize]
|
||||
z.data = z.zdata[:cap(z.zdata)][bSize:]
|
||||
z.idx = len(z.data)
|
||||
|
||||
_, _ = z.checksum.Write(buf[0:2])
|
||||
|
||||
if frameSize {
|
||||
buf := buf[:8]
|
||||
if _, err := io.ReadFull(z.src, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
z.Size = binary.LittleEndian.Uint64(buf)
|
||||
z.pos += 8
|
||||
_, _ = z.checksum.Write(buf)
|
||||
}
|
||||
|
||||
// Header checksum.
|
||||
if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
|
||||
return err
|
||||
}
|
||||
z.pos++
|
||||
if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
|
||||
return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
|
||||
}
|
||||
|
||||
z.Header.done = true
|
||||
if debugFlag {
|
||||
debug("header read: %v", z.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read decompresses data from the underlying source into the supplied buffer.
|
||||
//
|
||||
// Since there can be multiple streams concatenated, Header values may
|
||||
// change between calls to Read(). If that is the case, no data is actually read from
|
||||
// the underlying io.Reader, to allow for potential input buffer resizing.
|
||||
func (z *Reader) Read(buf []byte) (int, error) {
|
||||
if debugFlag {
|
||||
debug("Read buf len=%d", len(buf))
|
||||
}
|
||||
if !z.Header.done {
|
||||
if err := z.readHeader(true); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if debugFlag {
|
||||
debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
|
||||
len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if z.idx == len(z.data) {
|
||||
// No data ready for reading, process the next block.
|
||||
if debugFlag {
|
||||
debug("reading block from writer")
|
||||
}
|
||||
// Reset uncompressed buffer
|
||||
z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
|
||||
|
||||
// Block length: 0 = end of frame, highest bit set: uncompressed.
|
||||
bLen, err := z.readUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.pos += 4
|
||||
|
||||
if bLen == 0 {
|
||||
// End of frame reached.
|
||||
if !z.NoChecksum {
|
||||
// Validate the frame checksum.
|
||||
checksum, err := z.readUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if debugFlag {
|
||||
debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
|
||||
}
|
||||
z.pos += 4
|
||||
if h := z.checksum.Sum32(); checksum != h {
|
||||
return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
|
||||
}
|
||||
}
|
||||
|
||||
// Get ready for the next concatenated frame and keep the position.
|
||||
pos := z.pos
|
||||
z.Reset(z.src)
|
||||
z.pos = pos
|
||||
|
||||
// Since multiple frames can be concatenated, check for more.
|
||||
return 0, z.readHeader(false)
|
||||
}
|
||||
|
||||
if debugFlag {
|
||||
debug("raw block size %d", bLen)
|
||||
}
|
||||
if bLen&compressedBlockFlag > 0 {
|
||||
// Uncompressed block.
|
||||
bLen &= compressedBlockMask
|
||||
if debugFlag {
|
||||
debug("uncompressed block size %d", bLen)
|
||||
}
|
||||
if int(bLen) > cap(z.data) {
|
||||
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
|
||||
}
|
||||
z.data = z.data[:bLen]
|
||||
if _, err := io.ReadFull(z.src, z.data); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.pos += int64(bLen)
|
||||
if z.OnBlockDone != nil {
|
||||
z.OnBlockDone(int(bLen))
|
||||
}
|
||||
|
||||
if z.BlockChecksum {
|
||||
checksum, err := z.readUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.pos += 4
|
||||
|
||||
if h := xxh32.ChecksumZero(z.data); h != checksum {
|
||||
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// Compressed block.
|
||||
if debugFlag {
|
||||
debug("compressed block size %d", bLen)
|
||||
}
|
||||
if int(bLen) > cap(z.data) {
|
||||
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
|
||||
}
|
||||
zdata := z.zdata[:bLen]
|
||||
if _, err := io.ReadFull(z.src, zdata); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.pos += int64(bLen)
|
||||
|
||||
if z.BlockChecksum {
|
||||
checksum, err := z.readUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.pos += 4
|
||||
|
||||
if h := xxh32.ChecksumZero(zdata); h != checksum {
|
||||
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
|
||||
}
|
||||
}
|
||||
|
||||
n, err := UncompressBlock(zdata, z.data)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.data = z.data[:n]
|
||||
if z.OnBlockDone != nil {
|
||||
z.OnBlockDone(n)
|
||||
}
|
||||
}
|
||||
|
||||
if !z.NoChecksum {
|
||||
_, _ = z.checksum.Write(z.data)
|
||||
if debugFlag {
|
||||
debug("current frame checksum %x", z.checksum.Sum32())
|
||||
}
|
||||
}
|
||||
z.idx = 0
|
||||
}
|
||||
|
||||
if z.skip > int64(len(z.data[z.idx:])) {
|
||||
z.skip -= int64(len(z.data[z.idx:]))
|
||||
z.dpos += int64(len(z.data[z.idx:]))
|
||||
z.idx = len(z.data)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
z.idx += int(z.skip)
|
||||
z.dpos += z.skip
|
||||
z.skip = 0
|
||||
|
||||
n := copy(buf, z.data[z.idx:])
|
||||
z.idx += n
|
||||
z.dpos += int64(n)
|
||||
if debugFlag {
|
||||
debug("copied %d bytes to input", n)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker, but supports seeking forward from the current
|
||||
// position only. Any other seek will return an error. Allows skipping output
|
||||
// bytes which aren't needed, which in some scenarios is faster than reading
|
||||
// and discarding them.
|
||||
// Note this may cause future calls to Read() to read 0 bytes if all of the
|
||||
// data they would have returned is skipped.
|
||||
func (z *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
if offset < 0 || whence != io.SeekCurrent {
|
||||
return z.dpos + z.skip, ErrUnsupportedSeek
|
||||
}
|
||||
z.skip += offset
|
||||
return z.dpos + z.skip, nil
|
||||
}
|
||||
|
||||
// Reset discards the Reader's state and makes it equivalent to the
|
||||
// result of its original state from NewReader, but reading from r instead.
|
||||
// This permits reusing a Reader rather than allocating a new one.
|
||||
func (z *Reader) Reset(r io.Reader) {
|
||||
z.Header = Header{}
|
||||
z.pos = 0
|
||||
z.src = r
|
||||
z.zdata = z.zdata[:0]
|
||||
z.data = z.data[:0]
|
||||
z.idx = 0
|
||||
z.checksum.Reset()
|
||||
}
|
||||
|
||||
// readUint32 reads an uint32 into the supplied buffer.
|
||||
// The idea is to make use of the already allocated buffers avoiding additional allocations.
|
||||
func (z *Reader) readUint32() (uint32, error) {
|
||||
buf := z.buf[:4]
|
||||
_, err := io.ReadFull(z.src, buf)
|
||||
x := binary.LittleEndian.Uint32(buf)
|
||||
return x, err
|
||||
}
|
@@ -32,3 +32,5 @@ Temporary Items
|
||||
|
||||
cmd/*/*exe
|
||||
.idea
|
||||
|
||||
fuzz/*.zip
|
@@ -1,7 +1,7 @@
|
||||
# lz4 : LZ4 compression in pure Go
|
||||
|
||||
[](https://godoc.org/github.com/pierrec/lz4)
|
||||
[](https://travis-ci.org/pierrec/lz4)
|
||||
[](https://pkg.go.dev/github.com/pierrec/lz4/v4)
|
||||
[](https://github.com/pierrec/lz4/actions)
|
||||
[](https://goreportcard.com/report/github.com/pierrec/lz4)
|
||||
[](https://github.com/pierrec/lz4/tags)
|
||||
|
||||
@@ -15,13 +15,13 @@ The implementation is based on the reference C [one](https://github.com/lz4/lz4)
|
||||
Assuming you have the go toolchain installed:
|
||||
|
||||
```
|
||||
go get github.com/pierrec/lz4
|
||||
go get github.com/pierrec/lz4/v4
|
||||
```
|
||||
|
||||
There is a command line interface tool to compress and decompress LZ4 files.
|
||||
|
||||
```
|
||||
go install github.com/pierrec/lz4/cmd/lz4c
|
||||
go install github.com/pierrec/lz4/v4/cmd/lz4c
|
||||
```
|
||||
|
||||
Usage
|
||||
@@ -83,24 +83,10 @@ Contributions are very welcome for bug fixing, performance improvements...!
|
||||
|
||||
## Contributors
|
||||
|
||||
Thanks to all contributors so far:
|
||||
Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
|
||||
|
||||
- [@klauspost](https://github.com/klauspost)
|
||||
- [@heidawei](https://github.com/heidawei)
|
||||
- [@x4m](https://github.com/x4m)
|
||||
- [@Zariel](https://github.com/Zariel)
|
||||
- [@edwingeng](https://github.com/edwingeng)
|
||||
- [@danielmoy-google](https://github.com/danielmoy-google)
|
||||
- [@honda-tatsuya](https://github.com/honda-tatsuya)
|
||||
- [@h8liu](https://github.com/h8liu)
|
||||
- [@sbinet](https://github.com/sbinet)
|
||||
- [@fingon](https://github.com/fingon)
|
||||
- [@emfree](https://github.com/emfree)
|
||||
- [@lhemala](https://github.com/lhemala)
|
||||
- [@connor4312](https://github.com/connor4312)
|
||||
- [@oov](https://github.com/oov)
|
||||
- [@arya](https://github.com/arya)
|
||||
- [@ikkeps](https://github.com/ikkeps)
|
||||
Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
|
||||
|
||||
Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder
|
||||
Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code
|
||||
Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
|
||||
|
||||
Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
|
3
pkg/metadata/vendor/github.com/pierrec/lz4/v4/go.mod
generated
vendored
Normal file
3
pkg/metadata/vendor/github.com/pierrec/lz4/v4/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/pierrec/lz4/v4
|
||||
|
||||
go 1.14
|
0
pkg/metadata/vendor/github.com/pierrec/lz4/v4/go.sum
generated
vendored
Normal file
0
pkg/metadata/vendor/github.com/pierrec/lz4/v4/go.sum
generated
vendored
Normal file
@@ -1,66 +1,126 @@
|
||||
package lz4
|
||||
package lz4block
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"sync"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// The following constants are used to setup the compression algorithm.
|
||||
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
|
||||
winSizeLog = 16 // LZ4 64Kb window size limit
|
||||
winSize = 1 << winSizeLog
|
||||
winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
|
||||
|
||||
// hashLog determines the size of the hash table used to quickly find a previous match position.
|
||||
// Its value influences the compression speed and memory usage, the lower the faster,
|
||||
// but at the expense of the compression ratio.
|
||||
// 16 seems to be the best compromise for fast compression.
|
||||
hashLog = 16
|
||||
htSize = 1 << hashLog
|
||||
|
||||
mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
|
||||
)
|
||||
|
||||
func recoverBlock(e *error) {
|
||||
if r := recover(); r != nil && *e == nil {
|
||||
*e = lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
}
|
||||
|
||||
// blockHash hashes the lower 6 bytes into a value < htSize.
|
||||
func blockHash(x uint64) uint32 {
|
||||
const prime6bytes = 227718039650203
|
||||
return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
|
||||
}
|
||||
|
||||
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
|
||||
func CompressBlockBound(n int) int {
|
||||
return n + n/255 + 16
|
||||
}
|
||||
|
||||
// UncompressBlock uncompresses the source buffer into the destination one,
|
||||
// and returns the uncompressed size.
|
||||
//
|
||||
// The destination buffer must be sized appropriately.
|
||||
//
|
||||
// An error is returned if the source data is invalid or the destination buffer is too small.
|
||||
func UncompressBlock(src, dst []byte) (int, error) {
|
||||
func UncompressBlock(src, dst, dict []byte) (int, error) {
|
||||
if len(src) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if di := decodeBlock(dst, src); di >= 0 {
|
||||
if di := decodeBlock(dst, src, dict); di >= 0 {
|
||||
return di, nil
|
||||
}
|
||||
return 0, ErrInvalidSourceShortBuffer
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
|
||||
// CompressBlock compresses the source buffer into the destination one.
|
||||
// This is the fast version of LZ4 compression and also the default one.
|
||||
// The size of hashTable must be at least 64Kb.
|
||||
//
|
||||
// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
|
||||
//
|
||||
// An error is returned if the destination buffer is too small.
|
||||
func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
|
||||
defer recoverBlock(&err)
|
||||
type Compressor struct {
|
||||
// Offsets are at most 64kiB, so we can store only the lower 16 bits of
|
||||
// match positions: effectively, an offset from some 64kiB block boundary.
|
||||
//
|
||||
// When we retrieve such an offset, we interpret it as relative to the last
|
||||
// block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
|
||||
// depending on which of these is inside the current window. If a table
|
||||
// entry was generated more than 64kiB back in the input, we find out by
|
||||
// inspecting the input stream.
|
||||
table [htSize]uint16
|
||||
|
||||
// Bitmap indicating which positions in the table are in use.
|
||||
// This allows us to quickly reset the table for reuse,
|
||||
// without having to zero everything.
|
||||
inUse [htSize / 32]uint32
|
||||
}
|
||||
|
||||
// Get returns the position of a presumptive match for the hash h.
|
||||
// The match may be a false positive due to a hash collision or an old entry.
|
||||
// If si < winSize, the return value may be negative.
|
||||
func (c *Compressor) get(h uint32, si int) int {
|
||||
h &= htSize - 1
|
||||
i := 0
|
||||
if c.inUse[h/32]&(1<<(h%32)) != 0 {
|
||||
i = int(c.table[h])
|
||||
}
|
||||
i += si &^ winMask
|
||||
if i >= si {
|
||||
// Try previous 64kiB block (negative when in first block).
|
||||
i -= winSize
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (c *Compressor) put(h uint32, si int) {
|
||||
h &= htSize - 1
|
||||
c.table[h] = uint16(si)
|
||||
c.inUse[h/32] |= 1 << (h % 32)
|
||||
}
|
||||
|
||||
func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
|
||||
|
||||
var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
|
||||
|
||||
func CompressBlock(src, dst []byte) (int, error) {
|
||||
c := compressorPool.Get().(*Compressor)
|
||||
n, err := c.CompressBlock(src, dst)
|
||||
compressorPool.Put(c)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
|
||||
// Zero out reused table to avoid non-deterministic output (issue #65).
|
||||
c.reset()
|
||||
|
||||
// Return 0, nil only if the destination buffer size is < CompressBlockBound.
|
||||
isNotCompressible := len(dst) < CompressBlockBound(len(src))
|
||||
|
||||
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
|
||||
// This significantly speeds up incompressible data and usually has very small impact on compresssion.
|
||||
// This significantly speeds up incompressible data and usually has very small impact on compression.
|
||||
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
|
||||
const adaptSkipLog = 7
|
||||
sn, dn := len(src)-mfLimit, len(dst)
|
||||
if sn <= 0 || dn == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if len(hashTable) < htSize {
|
||||
return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize)
|
||||
}
|
||||
// Prove to the compiler the table has at least htSize elements.
|
||||
// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
|
||||
hashTable = hashTable[:htSize]
|
||||
|
||||
// si: Current position of the search.
|
||||
// anchor: Position of the current literals.
|
||||
var si, anchor int
|
||||
var si, di, anchor int
|
||||
sn := len(src) - mfLimit
|
||||
if sn <= 0 {
|
||||
goto lastLiterals
|
||||
}
|
||||
|
||||
// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
|
||||
for si < sn {
|
||||
@@ -71,33 +131,30 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
|
||||
|
||||
// We check a match at s, s+1 and s+2 and pick the first one we get.
|
||||
// Checking 3 only requires us to load the source one.
|
||||
ref := hashTable[h]
|
||||
ref2 := hashTable[h2]
|
||||
hashTable[h] = si
|
||||
hashTable[h2] = si + 1
|
||||
ref := c.get(h, si)
|
||||
ref2 := c.get(h2, si+1)
|
||||
c.put(h, si)
|
||||
c.put(h2, si+1)
|
||||
|
||||
offset := si - ref
|
||||
|
||||
// If offset <= 0 we got an old entry in the hash table.
|
||||
if offset <= 0 || offset >= winSize || // Out of window.
|
||||
uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
|
||||
if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
|
||||
// No match. Start calculating another hash.
|
||||
// The processor can usually do this out-of-order.
|
||||
h = blockHash(match >> 16)
|
||||
ref = hashTable[h]
|
||||
ref3 := c.get(h, si+2)
|
||||
|
||||
// Check the second match at si+1
|
||||
si += 1
|
||||
offset = si - ref2
|
||||
|
||||
if offset <= 0 || offset >= winSize ||
|
||||
uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
|
||||
if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
|
||||
// No match. Check the third match at si+2
|
||||
si += 1
|
||||
offset = si - ref
|
||||
hashTable[h] = si
|
||||
offset = si - ref3
|
||||
c.put(h, si)
|
||||
|
||||
if offset <= 0 || offset >= winSize ||
|
||||
uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
|
||||
if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
|
||||
// Skip one extra byte (at si+3) before we check 3 matches again.
|
||||
si += 2 + (si-anchor)>>adaptSkipLog
|
||||
continue
|
||||
@@ -124,7 +181,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
|
||||
si, mLen = si+mLen, si+minMatch
|
||||
|
||||
// Find the longest match by looking by batches of 8 bytes.
|
||||
for si < sn {
|
||||
for si+8 <= sn {
|
||||
x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
|
||||
if x == 0 {
|
||||
si += 8
|
||||
@@ -136,6 +193,9 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
|
||||
}
|
||||
|
||||
mLen = si - mLen
|
||||
if di >= len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
if mLen < 0xF {
|
||||
dst[di] = byte(mLen)
|
||||
} else {
|
||||
@@ -149,29 +209,40 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
|
||||
dst[di] |= 0xF0
|
||||
di++
|
||||
l := lLen - 0xF
|
||||
for ; l >= 0xFF; l -= 0xFF {
|
||||
for ; l >= 0xFF && di < len(dst); l -= 0xFF {
|
||||
dst[di] = 0xFF
|
||||
di++
|
||||
}
|
||||
if di >= len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
dst[di] = byte(l)
|
||||
}
|
||||
di++
|
||||
|
||||
// Literals.
|
||||
if di+lLen > len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
copy(dst[di:di+lLen], src[anchor:anchor+lLen])
|
||||
di += lLen + 2
|
||||
anchor = si
|
||||
|
||||
// Encode offset.
|
||||
_ = dst[di] // Bound check elimination.
|
||||
if di > len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
|
||||
|
||||
// Encode match length part 2.
|
||||
if mLen >= 0xF {
|
||||
for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
|
||||
for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
|
||||
dst[di] = 0xFF
|
||||
di++
|
||||
}
|
||||
if di >= len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
dst[di] = byte(mLen)
|
||||
di++
|
||||
}
|
||||
@@ -181,34 +252,44 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
|
||||
}
|
||||
// Hash match end-2
|
||||
h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
|
||||
hashTable[h] = si - 2
|
||||
c.put(h, si-2)
|
||||
}
|
||||
|
||||
if anchor == 0 {
|
||||
lastLiterals:
|
||||
if isNotCompressible && anchor == 0 {
|
||||
// Incompressible.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Last literals.
|
||||
if di >= len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
lLen := len(src) - anchor
|
||||
if lLen < 0xF {
|
||||
dst[di] = byte(lLen << 4)
|
||||
} else {
|
||||
dst[di] = 0xF0
|
||||
di++
|
||||
for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
|
||||
for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
|
||||
dst[di] = 0xFF
|
||||
di++
|
||||
}
|
||||
if di >= len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
dst[di] = byte(lLen)
|
||||
}
|
||||
di++
|
||||
|
||||
// Write the last literals.
|
||||
if di >= anchor {
|
||||
if isNotCompressible && di >= anchor {
|
||||
// Incompressible.
|
||||
return 0, nil
|
||||
}
|
||||
if di+len(src)-anchor > len(dst) {
|
||||
return 0, lz4errors.ErrInvalidSourceShortBuffer
|
||||
}
|
||||
di += copy(dst[di:di+len(src)-anchor], src[anchor:])
|
||||
return di, nil
|
||||
}
|
||||
@@ -219,37 +300,50 @@ func blockHashHC(x uint32) uint32 {
|
||||
return x * hasher >> (32 - winSizeLog)
|
||||
}
|
||||
|
||||
// CompressBlockHC compresses the source buffer src into the destination dst
|
||||
// with max search depth (use 0 or negative value for no max).
|
||||
//
|
||||
// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
|
||||
//
|
||||
// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
|
||||
//
|
||||
// An error is returned if the destination buffer is too small.
|
||||
func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
|
||||
type CompressorHC struct {
|
||||
// hashTable: stores the last position found for a given hash
|
||||
// chainTable: stores previous positions for a given hash
|
||||
hashTable, chainTable [htSize]int
|
||||
needsReset bool
|
||||
}
|
||||
|
||||
var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
|
||||
|
||||
func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
|
||||
c := compressorHCPool.Get().(*CompressorHC)
|
||||
n, err := c.CompressBlock(src, dst, depth)
|
||||
compressorHCPool.Put(c)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
|
||||
if c.needsReset {
|
||||
// Zero out reused table to avoid non-deterministic output (issue #65).
|
||||
c.hashTable = [htSize]int{}
|
||||
c.chainTable = [htSize]int{}
|
||||
}
|
||||
c.needsReset = true // Only false on first call.
|
||||
|
||||
defer recoverBlock(&err)
|
||||
|
||||
// Return 0, nil only if the destination buffer size is < CompressBlockBound.
|
||||
isNotCompressible := len(dst) < CompressBlockBound(len(src))
|
||||
|
||||
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
|
||||
// This significantly speeds up incompressible data and usually has very small impact on compresssion.
|
||||
// This significantly speeds up incompressible data and usually has very small impact on compression.
|
||||
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
|
||||
const adaptSkipLog = 7
|
||||
|
||||
sn, dn := len(src)-mfLimit, len(dst)
|
||||
if sn <= 0 || dn == 0 {
|
||||
return 0, nil
|
||||
var si, di, anchor int
|
||||
sn := len(src) - mfLimit
|
||||
if sn <= 0 {
|
||||
goto lastLiterals
|
||||
}
|
||||
var si int
|
||||
|
||||
// hashTable: stores the last position found for a given hash
|
||||
// chainTable: stores previous positions for a given hash
|
||||
var hashTable, chainTable [winSize]int
|
||||
|
||||
if depth <= 0 {
|
||||
if depth == 0 {
|
||||
depth = winSize
|
||||
}
|
||||
|
||||
anchor := si
|
||||
for si < sn {
|
||||
// Hash the next 4 bytes (sequence).
|
||||
match := binary.LittleEndian.Uint32(src[si:])
|
||||
@@ -258,7 +352,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
|
||||
// Follow the chain until out of window and give the longest match.
|
||||
mLen := 0
|
||||
offset := 0
|
||||
for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
|
||||
for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
|
||||
// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
|
||||
// must match to improve on the match length.
|
||||
if src[next+mLen] != src[si+mLen] {
|
||||
@@ -284,10 +378,9 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
|
||||
mLen = ml
|
||||
offset = si - next
|
||||
// Try another previous position with the same hash.
|
||||
try--
|
||||
}
|
||||
chainTable[si&winMask] = hashTable[h]
|
||||
hashTable[h] = si
|
||||
c.chainTable[si&winMask] = c.hashTable[h]
|
||||
c.hashTable[h] = si
|
||||
|
||||
// No match found.
|
||||
if mLen == 0 {
|
||||
@@ -306,8 +399,8 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
|
||||
match >>= 8
|
||||
match |= uint32(src[si+3]) << 24
|
||||
h := blockHashHC(match)
|
||||
chainTable[si&winMask] = hashTable[h]
|
||||
hashTable[h] = si
|
||||
c.chainTable[si&winMask] = c.hashTable[h]
|
||||
c.hashTable[h] = si
|
||||
si++
|
||||
}
|
||||
|
||||
@@ -356,12 +449,13 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
if anchor == 0 {
|
||||
if isNotCompressible && anchor == 0 {
|
||||
// Incompressible.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Last literals.
|
||||
lastLiterals:
|
||||
lLen := len(src) - anchor
|
||||
if lLen < 0xF {
|
||||
dst[di] = byte(lLen << 4)
|
||||
@@ -378,7 +472,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
|
||||
di++
|
||||
|
||||
// Write the last literals.
|
||||
if di >= anchor {
|
||||
if isNotCompressible && di >= anchor {
|
||||
// Incompressible.
|
||||
return 0, nil
|
||||
}
|
90
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
generated
vendored
Normal file
90
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Package lz4block provides LZ4 BlockSize types and pools of buffers.
|
||||
package lz4block
|
||||
|
||||
import "sync"
|
||||
|
||||
const (
|
||||
Block64Kb uint32 = 1 << (16 + iota*2)
|
||||
Block256Kb
|
||||
Block1Mb
|
||||
Block4Mb
|
||||
)
|
||||
|
||||
// In legacy mode all blocks are compressed regardless
|
||||
// of the compressed size: use the bound size.
|
||||
var Block8Mb = uint32(CompressBlockBound(8 << 20))
|
||||
|
||||
var (
|
||||
BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
|
||||
BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
|
||||
BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
|
||||
BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
|
||||
BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
|
||||
)
|
||||
|
||||
func Index(b uint32) BlockSizeIndex {
|
||||
switch b {
|
||||
case Block64Kb:
|
||||
return 4
|
||||
case Block256Kb:
|
||||
return 5
|
||||
case Block1Mb:
|
||||
return 6
|
||||
case Block4Mb:
|
||||
return 7
|
||||
case Block8Mb: // only valid in legacy mode
|
||||
return 3
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func IsValid(b uint32) bool {
|
||||
return Index(b) > 0
|
||||
}
|
||||
|
||||
type BlockSizeIndex uint8
|
||||
|
||||
func (b BlockSizeIndex) IsValid() bool {
|
||||
switch b {
|
||||
case 4, 5, 6, 7:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b BlockSizeIndex) Get() []byte {
|
||||
var buf interface{}
|
||||
switch b {
|
||||
case 4:
|
||||
buf = BlockPool64K.Get()
|
||||
case 5:
|
||||
buf = BlockPool256K.Get()
|
||||
case 6:
|
||||
buf = BlockPool1M.Get()
|
||||
case 7:
|
||||
buf = BlockPool4M.Get()
|
||||
case 3:
|
||||
buf = BlockPool8M.Get()
|
||||
}
|
||||
return buf.([]byte)
|
||||
}
|
||||
|
||||
func Put(buf []byte) {
|
||||
// Safeguard: do not allow invalid buffers.
|
||||
switch c := cap(buf); uint32(c) {
|
||||
case Block64Kb:
|
||||
BlockPool64K.Put(buf[:c])
|
||||
case Block256Kb:
|
||||
BlockPool256K.Put(buf[:c])
|
||||
case Block1Mb:
|
||||
BlockPool1M.Put(buf[:c])
|
||||
case Block4Mb:
|
||||
BlockPool4M.Put(buf[:c])
|
||||
case Block8Mb:
|
||||
BlockPool8M.Put(buf[:c])
|
||||
}
|
||||
}
|
||||
|
||||
type CompressionLevel uint32
|
||||
|
||||
const Fast CompressionLevel = 0
|
@@ -2,12 +2,13 @@
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// AX scratch
|
||||
// BX scratch
|
||||
// CX scratch
|
||||
// DX token
|
||||
// CX literal and match lengths
|
||||
// DX token, match offset
|
||||
//
|
||||
// DI &dst
|
||||
// SI &src
|
||||
@@ -16,9 +17,11 @@
|
||||
// R11 &dst
|
||||
// R12 short output end
|
||||
// R13 short input end
|
||||
// func decodeBlock(dst, src []byte) int
|
||||
// using 50 bytes of stack currently
|
||||
TEXT ·decodeBlock(SB), NOSPLIT, $64-56
|
||||
// R14 &dict
|
||||
// R15 len(dict)
|
||||
|
||||
// func decodeBlock(dst, src, dict []byte) int
|
||||
TEXT ·decodeBlock(SB), NOSPLIT, $48-80
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ DI, R11
|
||||
MOVQ dst_len+8(FP), R8
|
||||
@@ -26,8 +29,13 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56
|
||||
|
||||
MOVQ src_base+24(FP), SI
|
||||
MOVQ src_len+32(FP), R9
|
||||
CMPQ R9, $0
|
||||
JE err_corrupt
|
||||
ADDQ SI, R9
|
||||
|
||||
MOVQ dict_base+48(FP), R14
|
||||
MOVQ dict_len+56(FP), R15
|
||||
|
||||
// shortcut ends
|
||||
// short output end
|
||||
MOVQ R8, R12
|
||||
@@ -36,28 +44,26 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56
|
||||
MOVQ R9, R13
|
||||
SUBQ $16, R13
|
||||
|
||||
loop:
|
||||
// for si < len(src)
|
||||
CMPQ SI, R9
|
||||
JGE end
|
||||
XORL CX, CX
|
||||
|
||||
loop:
|
||||
// token := uint32(src[si])
|
||||
MOVBQZX (SI), DX
|
||||
MOVBLZX (SI), DX
|
||||
INCQ SI
|
||||
|
||||
// lit_len = token >> 4
|
||||
// if lit_len > 0
|
||||
// CX = lit_len
|
||||
MOVQ DX, CX
|
||||
SHRQ $4, CX
|
||||
MOVL DX, CX
|
||||
SHRL $4, CX
|
||||
|
||||
// if lit_len != 0xF
|
||||
CMPQ CX, $0xF
|
||||
JEQ lit_len_loop_pre
|
||||
CMPL CX, $0xF
|
||||
JEQ lit_len_loop
|
||||
CMPQ DI, R12
|
||||
JGE lit_len_loop_pre
|
||||
JAE copy_literal
|
||||
CMPQ SI, R13
|
||||
JGE lit_len_loop_pre
|
||||
JAE copy_literal
|
||||
|
||||
// copy shortcut
|
||||
|
||||
@@ -76,28 +82,32 @@ loop:
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
|
||||
MOVQ DX, CX
|
||||
ANDQ $0xF, CX
|
||||
MOVL DX, CX
|
||||
ANDL $0xF, CX
|
||||
|
||||
// The second stage: prepare for match copying, decode full info.
|
||||
// If it doesn't work out, the info won't be wasted.
|
||||
// offset := uint16(data[:2])
|
||||
MOVWQZX (SI), DX
|
||||
MOVWLZX (SI), DX
|
||||
TESTL DX, DX
|
||||
JE err_corrupt
|
||||
ADDQ $2, SI
|
||||
JC err_short_buf
|
||||
|
||||
MOVQ DI, AX
|
||||
SUBQ DX, AX
|
||||
JC err_corrupt
|
||||
CMPQ AX, DI
|
||||
JGT err_short_buf
|
||||
JA err_short_buf
|
||||
|
||||
// if we can't do the second stage then jump straight to read the
|
||||
// match length, we already have the offset.
|
||||
CMPQ CX, $0xF
|
||||
CMPL CX, $0xF
|
||||
JEQ match_len_loop_pre
|
||||
CMPQ DX, $8
|
||||
CMPL DX, $8
|
||||
JLT match_len_loop_pre
|
||||
CMPQ AX, R11
|
||||
JLT err_short_buf
|
||||
JB match_len_loop_pre
|
||||
|
||||
// memcpy(op + 0, match + 0, 8);
|
||||
MOVQ (AX), BX
|
||||
@@ -109,72 +119,63 @@ loop:
|
||||
MOVW 16(AX), BX
|
||||
MOVW BX, 16(DI)
|
||||
|
||||
ADDQ $4, DI // minmatch
|
||||
ADDQ CX, DI
|
||||
LEAQ const_minMatch(DI)(CX*1), DI
|
||||
|
||||
// shortcut complete, load next token
|
||||
JMP loop
|
||||
|
||||
lit_len_loop_pre:
|
||||
// if lit_len > 0
|
||||
CMPQ CX, $0
|
||||
JEQ offset
|
||||
CMPQ CX, $0xF
|
||||
JNE copy_literal
|
||||
JMP loopcheck
|
||||
|
||||
// Read the rest of the literal length:
|
||||
// do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
|
||||
lit_len_loop:
|
||||
// for src[si] == 0xFF
|
||||
CMPB (SI), $0xFF
|
||||
JNE lit_len_finalise
|
||||
CMPQ SI, R9
|
||||
JAE err_short_buf
|
||||
|
||||
// bounds check src[si+1]
|
||||
MOVQ SI, AX
|
||||
ADDQ $1, AX
|
||||
CMPQ AX, R9
|
||||
JGT err_short_buf
|
||||
|
||||
// lit_len += 0xFF
|
||||
ADDQ $0xFF, CX
|
||||
MOVBLZX (SI), BX
|
||||
INCQ SI
|
||||
JMP lit_len_loop
|
||||
ADDQ BX, CX
|
||||
|
||||
lit_len_finalise:
|
||||
// lit_len += int(src[si])
|
||||
// si++
|
||||
MOVBQZX (SI), AX
|
||||
ADDQ AX, CX
|
||||
INCQ SI
|
||||
CMPB BX, $0xFF
|
||||
JE lit_len_loop
|
||||
|
||||
copy_literal:
|
||||
// bounds check src and dst
|
||||
MOVQ SI, AX
|
||||
ADDQ CX, AX
|
||||
JC err_short_buf
|
||||
CMPQ AX, R9
|
||||
JGT err_short_buf
|
||||
JA err_short_buf
|
||||
|
||||
MOVQ DI, AX
|
||||
ADDQ CX, AX
|
||||
CMPQ AX, R8
|
||||
JGT err_short_buf
|
||||
MOVQ DI, BX
|
||||
ADDQ CX, BX
|
||||
JC err_short_buf
|
||||
CMPQ BX, R8
|
||||
JA err_short_buf
|
||||
|
||||
// whats a good cut off to call memmove?
|
||||
CMPQ CX, $16
|
||||
// Copy literals of <=48 bytes through the XMM registers.
|
||||
CMPQ CX, $48
|
||||
JGT memmove_lit
|
||||
|
||||
// if len(dst[di:]) < 16
|
||||
// if len(dst[di:]) < 48
|
||||
MOVQ R8, AX
|
||||
SUBQ DI, AX
|
||||
CMPQ AX, $16
|
||||
CMPQ AX, $48
|
||||
JLT memmove_lit
|
||||
|
||||
// if len(src[si:]) < 16
|
||||
MOVQ R9, AX
|
||||
SUBQ SI, AX
|
||||
CMPQ AX, $16
|
||||
// if len(src[si:]) < 48
|
||||
MOVQ R9, BX
|
||||
SUBQ SI, BX
|
||||
CMPQ BX, $48
|
||||
JLT memmove_lit
|
||||
|
||||
MOVOU (SI), X0
|
||||
MOVOU 16(SI), X1
|
||||
MOVOU 32(SI), X2
|
||||
MOVOU X0, (DI)
|
||||
MOVOU X1, 16(DI)
|
||||
MOVOU X2, 32(DI)
|
||||
|
||||
ADDQ CX, SI
|
||||
ADDQ CX, DI
|
||||
|
||||
JMP finish_lit_copy
|
||||
|
||||
@@ -183,18 +184,20 @@ memmove_lit:
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ SI, 8(SP)
|
||||
MOVQ CX, 16(SP)
|
||||
// spill
|
||||
|
||||
// Spill registers. Increment SI, DI now so we don't need to save CX.
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP) // need len to inc SI, DI after
|
||||
MOVB DX, 48(SP)
|
||||
MOVL DX, 40(SP)
|
||||
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// restore registers
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX
|
||||
MOVB 48(SP), DX
|
||||
MOVL 40(SP), DX
|
||||
|
||||
// recalc initial values
|
||||
MOVQ dst_base+0(FP), R8
|
||||
@@ -202,77 +205,62 @@ memmove_lit:
|
||||
ADDQ dst_len+8(FP), R8
|
||||
MOVQ src_base+24(FP), R9
|
||||
ADDQ src_len+32(FP), R9
|
||||
MOVQ dict_base+48(FP), R14
|
||||
MOVQ dict_len+56(FP), R15
|
||||
MOVQ R8, R12
|
||||
SUBQ $32, R12
|
||||
MOVQ R9, R13
|
||||
SUBQ $16, R13
|
||||
|
||||
finish_lit_copy:
|
||||
ADDQ CX, SI
|
||||
ADDQ CX, DI
|
||||
|
||||
CMPQ SI, R9
|
||||
JGE end
|
||||
|
||||
offset:
|
||||
// CX := mLen
|
||||
// free up DX to use for offset
|
||||
MOVQ DX, CX
|
||||
MOVL DX, CX
|
||||
ANDL $0xF, CX
|
||||
|
||||
MOVQ SI, AX
|
||||
ADDQ $2, AX
|
||||
CMPQ AX, R9
|
||||
JGT err_short_buf
|
||||
CMPQ SI, R9
|
||||
JAE end
|
||||
|
||||
// offset
|
||||
// DX := int(src[si]) | int(src[si+1])<<8
|
||||
MOVWQZX (SI), DX
|
||||
// si += 2
|
||||
// DX := int(src[si-2]) | int(src[si-1])<<8
|
||||
ADDQ $2, SI
|
||||
JC err_short_buf
|
||||
CMPQ SI, R9
|
||||
JA err_short_buf
|
||||
MOVWQZX -2(SI), DX
|
||||
|
||||
// 0 offset is invalid
|
||||
CMPQ DX, $0
|
||||
JEQ err_corrupt
|
||||
|
||||
ANDB $0xF, CX
|
||||
TESTL DX, DX
|
||||
JEQ err_corrupt
|
||||
|
||||
match_len_loop_pre:
|
||||
// if mlen != 0xF
|
||||
CMPB CX, $0xF
|
||||
JNE copy_match
|
||||
|
||||
// do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
|
||||
match_len_loop:
|
||||
// for src[si] == 0xFF
|
||||
// lit_len += 0xFF
|
||||
CMPB (SI), $0xFF
|
||||
JNE match_len_finalise
|
||||
CMPQ SI, R9
|
||||
JAE err_short_buf
|
||||
|
||||
// bounds check src[si+1]
|
||||
MOVQ SI, AX
|
||||
ADDQ $1, AX
|
||||
CMPQ AX, R9
|
||||
JGT err_short_buf
|
||||
|
||||
ADDQ $0xFF, CX
|
||||
MOVBLZX (SI), BX
|
||||
INCQ SI
|
||||
JMP match_len_loop
|
||||
ADDQ BX, CX
|
||||
|
||||
match_len_finalise:
|
||||
// lit_len += int(src[si])
|
||||
// si++
|
||||
MOVBQZX (SI), AX
|
||||
ADDQ AX, CX
|
||||
INCQ SI
|
||||
CMPB BX, $0xFF
|
||||
JE match_len_loop
|
||||
|
||||
copy_match:
|
||||
// mLen += minMatch
|
||||
ADDQ $4, CX
|
||||
ADDQ $const_minMatch, CX
|
||||
|
||||
// check we have match_len bytes left in dst
|
||||
// di+match_len < len(dst)
|
||||
MOVQ DI, AX
|
||||
ADDQ CX, AX
|
||||
JC err_short_buf
|
||||
CMPQ AX, R8
|
||||
JGT err_short_buf
|
||||
JA err_short_buf
|
||||
|
||||
// DX = offset
|
||||
// CX = match_len
|
||||
@@ -282,14 +270,14 @@ copy_match:
|
||||
|
||||
// check BX is within dst
|
||||
// if BX < &dst
|
||||
JC copy_match_from_dict
|
||||
CMPQ BX, R11
|
||||
JLT err_short_buf
|
||||
JBE copy_match_from_dict
|
||||
|
||||
// if offset + match_len < di
|
||||
MOVQ BX, AX
|
||||
ADDQ CX, AX
|
||||
LEAQ (BX)(CX*1), AX
|
||||
CMPQ DI, AX
|
||||
JGT copy_interior_match
|
||||
JA copy_interior_match
|
||||
|
||||
// AX := len(dst[:di])
|
||||
// MOVQ DI, AX
|
||||
@@ -309,11 +297,9 @@ copy_match_loop:
|
||||
INCQ DI
|
||||
INCQ BX
|
||||
DECQ CX
|
||||
JNZ copy_match_loop
|
||||
|
||||
CMPQ CX, $0
|
||||
JGT copy_match_loop
|
||||
|
||||
JMP loop
|
||||
JMP loopcheck
|
||||
|
||||
copy_interior_match:
|
||||
CMPQ CX, $16
|
||||
@@ -329,23 +315,97 @@ copy_interior_match:
|
||||
MOVOU X0, (DI)
|
||||
|
||||
ADDQ CX, DI
|
||||
JMP loop
|
||||
XORL CX, CX
|
||||
JMP loopcheck
|
||||
|
||||
copy_match_from_dict:
|
||||
// CX = match_len
|
||||
// BX = &dst + (di - offset)
|
||||
|
||||
// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
|
||||
MOVQ R11, AX
|
||||
SUBQ BX, AX
|
||||
|
||||
// BX = len(dict) - dict_bytes_available
|
||||
MOVQ R15, BX
|
||||
SUBQ AX, BX
|
||||
JS err_short_dict
|
||||
|
||||
ADDQ R14, BX
|
||||
|
||||
// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
|
||||
CMPQ CX, AX
|
||||
JLT memmove_match
|
||||
|
||||
// The match stretches over the dictionary and our block
|
||||
// 1) copy what comes from the dictionary
|
||||
// AX = dict_bytes_available = copy_size
|
||||
// BX = &dict_end - copy_size
|
||||
// CX = match_len
|
||||
|
||||
// memmove(to, from, len)
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ BX, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
// store extra stuff we want to recover
|
||||
// spill
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// restore registers
|
||||
MOVQ 16(SP), AX // copy_size
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX // match_len
|
||||
|
||||
// recalc initial values
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ R8, R11 // TODO: make these sensible numbers
|
||||
ADDQ dst_len+8(FP), R8
|
||||
MOVQ src_base+24(FP), R9
|
||||
ADDQ src_len+32(FP), R9
|
||||
MOVQ dict_base+48(FP), R14
|
||||
MOVQ dict_len+56(FP), R15
|
||||
MOVQ R8, R12
|
||||
SUBQ $32, R12
|
||||
MOVQ R9, R13
|
||||
SUBQ $16, R13
|
||||
|
||||
// di+=copy_size
|
||||
ADDQ AX, DI
|
||||
|
||||
// 2) copy the rest from the current block
|
||||
// CX = match_len - copy_size = rest_size
|
||||
SUBQ AX, CX
|
||||
MOVQ R11, BX
|
||||
|
||||
// check if we have a copy overlap
|
||||
// AX = &dst + rest_size
|
||||
MOVQ CX, AX
|
||||
ADDQ BX, AX
|
||||
// if &dst + rest_size > di, copy byte by byte
|
||||
CMPQ AX, DI
|
||||
|
||||
JA copy_match_loop
|
||||
|
||||
memmove_match:
|
||||
// memmove(to, from, len)
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ BX, 8(SP)
|
||||
MOVQ CX, 16(SP)
|
||||
// spill
|
||||
|
||||
// Spill registers. Increment DI now so we don't need to save CX.
|
||||
ADDQ CX, DI
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP) // need len to inc SI, DI after
|
||||
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// restore registers
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX
|
||||
|
||||
// recalc initial values
|
||||
MOVQ dst_base+0(FP), R8
|
||||
@@ -357,19 +417,32 @@ memmove_match:
|
||||
SUBQ $32, R12
|
||||
MOVQ R9, R13
|
||||
SUBQ $16, R13
|
||||
MOVQ dict_base+48(FP), R14
|
||||
MOVQ dict_len+56(FP), R15
|
||||
XORL CX, CX
|
||||
|
||||
ADDQ CX, DI
|
||||
JMP loop
|
||||
loopcheck:
|
||||
// for si < len(src)
|
||||
CMPQ SI, R9
|
||||
JB loop
|
||||
|
||||
end:
|
||||
// Remaining length must be zero.
|
||||
TESTQ CX, CX
|
||||
JNE err_corrupt
|
||||
|
||||
SUBQ R11, DI
|
||||
MOVQ DI, ret+72(FP)
|
||||
RET
|
||||
|
||||
err_corrupt:
|
||||
MOVQ $-1, ret+48(FP)
|
||||
MOVQ $-1, ret+72(FP)
|
||||
RET
|
||||
|
||||
err_short_buf:
|
||||
MOVQ $-2, ret+48(FP)
|
||||
MOVQ $-2, ret+72(FP)
|
||||
RET
|
||||
|
||||
end:
|
||||
SUBQ R11, DI
|
||||
MOVQ DI, ret+48(FP)
|
||||
err_short_dict:
|
||||
MOVQ $-3, ret+72(FP)
|
||||
RET
|
231
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
generated
vendored
Normal file
231
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
generated
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation.
|
||||
#define dst R0
|
||||
#define dstorig R1
|
||||
#define src R2
|
||||
#define dstend R3
|
||||
#define srcend R4
|
||||
#define match R5 // Match address.
|
||||
#define dictend R6
|
||||
#define token R7
|
||||
#define len R8 // Literal and match lengths.
|
||||
#define offset R7 // Match offset; overlaps with token.
|
||||
#define tmp1 R9
|
||||
#define tmp2 R11
|
||||
#define tmp3 R12
|
||||
|
||||
// func decodeBlock(dst, src, dict []byte) int
|
||||
TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
|
||||
MOVW dst_base +0(FP), dst
|
||||
MOVW dst_len +4(FP), dstend
|
||||
MOVW src_base +12(FP), src
|
||||
MOVW src_len +16(FP), srcend
|
||||
|
||||
CMP $0, srcend
|
||||
BEQ shortSrc
|
||||
|
||||
ADD dst, dstend
|
||||
ADD src, srcend
|
||||
|
||||
MOVW dst, dstorig
|
||||
|
||||
loop:
|
||||
// Read token. Extract literal length.
|
||||
MOVBU.P 1(src), token
|
||||
MOVW token >> 4, len
|
||||
CMP $15, len
|
||||
BNE readLitlenDone
|
||||
|
||||
readLitlenLoop:
|
||||
CMP src, srcend
|
||||
BEQ shortSrc
|
||||
MOVBU.P 1(src), tmp1
|
||||
ADD.S tmp1, len
|
||||
BVS shortDst
|
||||
CMP $255, tmp1
|
||||
BEQ readLitlenLoop
|
||||
|
||||
readLitlenDone:
|
||||
CMP $0, len
|
||||
BEQ copyLiteralDone
|
||||
|
||||
// Bounds check dst+len and src+len.
|
||||
ADD.S dst, len, tmp1
|
||||
ADD.CC.S src, len, tmp2
|
||||
BCS shortSrc
|
||||
CMP dstend, tmp1
|
||||
//BHI shortDst // Uncomment for distinct error codes.
|
||||
CMP.LS srcend, tmp2
|
||||
BHI shortSrc
|
||||
|
||||
// Copy literal.
|
||||
CMP $4, len
|
||||
BLO copyLiteralFinish
|
||||
|
||||
// Copy 0-3 bytes until src is aligned.
|
||||
TST $1, src
|
||||
MOVBU.NE.P 1(src), tmp1
|
||||
MOVB.NE.P tmp1, 1(dst)
|
||||
SUB.NE $1, len
|
||||
|
||||
TST $2, src
|
||||
MOVHU.NE.P 2(src), tmp2
|
||||
MOVB.NE.P tmp2, 1(dst)
|
||||
MOVW.NE tmp2 >> 8, tmp1
|
||||
MOVB.NE.P tmp1, 1(dst)
|
||||
SUB.NE $2, len
|
||||
|
||||
B copyLiteralLoopCond
|
||||
|
||||
copyLiteralLoop:
|
||||
// Aligned load, unaligned write.
|
||||
MOVW.P 4(src), tmp1
|
||||
MOVW tmp1 >> 8, tmp2
|
||||
MOVB tmp2, 1(dst)
|
||||
MOVW tmp1 >> 16, tmp3
|
||||
MOVB tmp3, 2(dst)
|
||||
MOVW tmp1 >> 24, tmp2
|
||||
MOVB tmp2, 3(dst)
|
||||
MOVB.P tmp1, 4(dst)
|
||||
copyLiteralLoopCond:
|
||||
// Loop until len-4 < 0.
|
||||
SUB.S $4, len
|
||||
BPL copyLiteralLoop
|
||||
|
||||
copyLiteralFinish:
|
||||
// Copy remaining 0-3 bytes.
|
||||
// At this point, len may be < 0, but len&3 is still accurate.
|
||||
TST $1, len
|
||||
MOVB.NE.P 1(src), tmp3
|
||||
MOVB.NE.P tmp3, 1(dst)
|
||||
TST $2, len
|
||||
MOVB.NE.P 2(src), tmp1
|
||||
MOVB.NE.P tmp1, 2(dst)
|
||||
MOVB.NE -1(src), tmp2
|
||||
MOVB.NE tmp2, -1(dst)
|
||||
|
||||
copyLiteralDone:
|
||||
// Initial part of match length.
|
||||
// This frees up the token register for reuse as offset.
|
||||
AND $15, token, len
|
||||
|
||||
CMP src, srcend
|
||||
BEQ end
|
||||
|
||||
// Read offset.
|
||||
ADD.S $2, src
|
||||
BCS shortSrc
|
||||
CMP srcend, src
|
||||
BHI shortSrc
|
||||
MOVBU -2(src), offset
|
||||
MOVBU -1(src), tmp1
|
||||
ORR.S tmp1 << 8, offset
|
||||
BEQ corrupt
|
||||
|
||||
// Read rest of match length.
|
||||
CMP $15, len
|
||||
BNE readMatchlenDone
|
||||
|
||||
readMatchlenLoop:
|
||||
CMP src, srcend
|
||||
BEQ shortSrc
|
||||
MOVBU.P 1(src), tmp1
|
||||
ADD.S tmp1, len
|
||||
BVS shortDst
|
||||
CMP $255, tmp1
|
||||
BEQ readMatchlenLoop
|
||||
|
||||
readMatchlenDone:
|
||||
// Bounds check dst+len+minMatch.
|
||||
ADD.S dst, len, tmp1
|
||||
ADD.CC.S $const_minMatch, tmp1
|
||||
BCS shortDst
|
||||
CMP dstend, tmp1
|
||||
BHI shortDst
|
||||
|
||||
RSB dst, offset, match
|
||||
CMP dstorig, match
|
||||
BGE copyMatch4
|
||||
|
||||
// match < dstorig means the match starts in the dictionary,
|
||||
// at len(dict) - offset + (dst - dstorig).
|
||||
MOVW dict_base+24(FP), match
|
||||
MOVW dict_len +28(FP), dictend
|
||||
|
||||
ADD $const_minMatch, len
|
||||
|
||||
RSB dst, dstorig, tmp1
|
||||
RSB dictend, offset, tmp2
|
||||
ADD.S tmp2, tmp1
|
||||
BMI shortDict
|
||||
ADD match, dictend
|
||||
ADD tmp1, match
|
||||
|
||||
copyDict:
|
||||
MOVBU.P 1(match), tmp1
|
||||
MOVB.P tmp1, 1(dst)
|
||||
SUB.S $1, len
|
||||
CMP.NE match, dictend
|
||||
BNE copyDict
|
||||
|
||||
// If the match extends beyond the dictionary, the rest is at dstorig.
|
||||
CMP $0, len
|
||||
BEQ copyMatchDone
|
||||
MOVW dstorig, match
|
||||
B copyMatch
|
||||
|
||||
// Copy a regular match.
|
||||
// Since len+minMatch is at least four, we can do a 4× unrolled
|
||||
// byte copy loop. Using MOVW instead of four byte loads is faster,
|
||||
// but to remain portable we'd have to align match first, which is
|
||||
// too expensive. By alternating loads and stores, we also handle
|
||||
// the case offset < 4.
|
||||
copyMatch4:
|
||||
SUB.S $4, len
|
||||
MOVBU.P 4(match), tmp1
|
||||
MOVB.P tmp1, 4(dst)
|
||||
MOVBU -3(match), tmp2
|
||||
MOVB tmp2, -3(dst)
|
||||
MOVBU -2(match), tmp3
|
||||
MOVB tmp3, -2(dst)
|
||||
MOVBU -1(match), tmp1
|
||||
MOVB tmp1, -1(dst)
|
||||
BPL copyMatch4
|
||||
|
||||
// Restore len, which is now negative.
|
||||
ADD.S $4, len
|
||||
BEQ copyMatchDone
|
||||
|
||||
copyMatch:
|
||||
// Finish with a byte-at-a-time copy.
|
||||
SUB.S $1, len
|
||||
MOVBU.P 1(match), tmp2
|
||||
MOVB.P tmp2, 1(dst)
|
||||
BNE copyMatch
|
||||
|
||||
copyMatchDone:
|
||||
CMP src, srcend
|
||||
BNE loop
|
||||
|
||||
end:
|
||||
CMP $0, len
|
||||
BNE corrupt
|
||||
SUB dstorig, dst, tmp1
|
||||
MOVW tmp1, ret+36(FP)
|
||||
RET
|
||||
|
||||
// The error cases have distinct labels so we can put different
|
||||
// return codes here when debugging, or if the error returns need to
|
||||
// be changed.
|
||||
shortDict:
|
||||
shortDst:
|
||||
shortSrc:
|
||||
corrupt:
|
||||
MOVW $-1, tmp1
|
||||
MOVW tmp1, ret+36(FP)
|
||||
RET
|
230
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
generated
vendored
Normal file
230
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,230 @@
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
// This implementation assumes that strict alignment checking is turned off.
|
||||
// The Go compiler makes the same assumption.
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation.
|
||||
#define dst R0
|
||||
#define dstorig R1
|
||||
#define src R2
|
||||
#define dstend R3
|
||||
#define dstend16 R4 // dstend - 16
|
||||
#define srcend R5
|
||||
#define srcend16 R6 // srcend - 16
|
||||
#define match R7 // Match address.
|
||||
#define dict R8
|
||||
#define dictlen R9
|
||||
#define dictend R10
|
||||
#define token R11
|
||||
#define len R12 // Literal and match lengths.
|
||||
#define lenRem R13
|
||||
#define offset R14 // Match offset.
|
||||
#define tmp1 R15
|
||||
#define tmp2 R16
|
||||
#define tmp3 R17
|
||||
#define tmp4 R19
|
||||
|
||||
// func decodeBlock(dst, src, dict []byte) int
|
||||
TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
|
||||
LDP dst_base+0(FP), (dst, dstend)
|
||||
ADD dst, dstend
|
||||
MOVD dst, dstorig
|
||||
|
||||
LDP src_base+24(FP), (src, srcend)
|
||||
CBZ srcend, shortSrc
|
||||
ADD src, srcend
|
||||
|
||||
// dstend16 = max(dstend-16, 0) and similarly for srcend16.
|
||||
SUBS $16, dstend, dstend16
|
||||
CSEL LO, ZR, dstend16, dstend16
|
||||
SUBS $16, srcend, srcend16
|
||||
CSEL LO, ZR, srcend16, srcend16
|
||||
|
||||
LDP dict_base+48(FP), (dict, dictlen)
|
||||
ADD dict, dictlen, dictend
|
||||
|
||||
loop:
|
||||
// Read token. Extract literal length.
|
||||
MOVBU.P 1(src), token
|
||||
LSR $4, token, len
|
||||
CMP $15, len
|
||||
BNE readLitlenDone
|
||||
|
||||
readLitlenLoop:
|
||||
CMP src, srcend
|
||||
BEQ shortSrc
|
||||
MOVBU.P 1(src), tmp1
|
||||
ADDS tmp1, len
|
||||
BVS shortDst
|
||||
CMP $255, tmp1
|
||||
BEQ readLitlenLoop
|
||||
|
||||
readLitlenDone:
|
||||
CBZ len, copyLiteralDone
|
||||
|
||||
// Bounds check dst+len and src+len.
|
||||
ADDS dst, len, tmp1
|
||||
BCS shortSrc
|
||||
ADDS src, len, tmp2
|
||||
BCS shortSrc
|
||||
CMP dstend, tmp1
|
||||
BHI shortDst
|
||||
CMP srcend, tmp2
|
||||
BHI shortSrc
|
||||
|
||||
// Copy literal.
|
||||
SUBS $16, len
|
||||
BLO copyLiteralShort
|
||||
|
||||
copyLiteralLoop:
|
||||
LDP.P 16(src), (tmp1, tmp2)
|
||||
STP.P (tmp1, tmp2), 16(dst)
|
||||
SUBS $16, len
|
||||
BPL copyLiteralLoop
|
||||
|
||||
// Copy (final part of) literal of length 0-15.
|
||||
// If we have >=16 bytes left in src and dst, just copy 16 bytes.
|
||||
copyLiteralShort:
|
||||
CMP dstend16, dst
|
||||
CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
|
||||
BHS copyLiteralShortEnd
|
||||
|
||||
AND $15, len
|
||||
|
||||
LDP (src), (tmp1, tmp2)
|
||||
ADD len, src
|
||||
STP (tmp1, tmp2), (dst)
|
||||
ADD len, dst
|
||||
|
||||
B copyLiteralDone
|
||||
|
||||
// Safe but slow copy near the end of src, dst.
|
||||
copyLiteralShortEnd:
|
||||
TBZ $3, len, 3(PC)
|
||||
MOVD.P 8(src), tmp1
|
||||
MOVD.P tmp1, 8(dst)
|
||||
TBZ $2, len, 3(PC)
|
||||
MOVW.P 4(src), tmp2
|
||||
MOVW.P tmp2, 4(dst)
|
||||
TBZ $1, len, 3(PC)
|
||||
MOVH.P 2(src), tmp3
|
||||
MOVH.P tmp3, 2(dst)
|
||||
TBZ $0, len, 3(PC)
|
||||
MOVBU.P 1(src), tmp4
|
||||
MOVB.P tmp4, 1(dst)
|
||||
|
||||
copyLiteralDone:
|
||||
// Initial part of match length.
|
||||
AND $15, token, len
|
||||
|
||||
CMP src, srcend
|
||||
BEQ end
|
||||
|
||||
// Read offset.
|
||||
ADDS $2, src
|
||||
BCS shortSrc
|
||||
CMP srcend, src
|
||||
BHI shortSrc
|
||||
MOVHU -2(src), offset
|
||||
CBZ offset, corrupt
|
||||
|
||||
// Read rest of match length.
|
||||
CMP $15, len
|
||||
BNE readMatchlenDone
|
||||
|
||||
readMatchlenLoop:
|
||||
CMP src, srcend
|
||||
BEQ shortSrc
|
||||
MOVBU.P 1(src), tmp1
|
||||
ADDS tmp1, len
|
||||
BVS shortDst
|
||||
CMP $255, tmp1
|
||||
BEQ readMatchlenLoop
|
||||
|
||||
readMatchlenDone:
|
||||
ADD $const_minMatch, len
|
||||
|
||||
// Bounds check dst+len.
|
||||
ADDS dst, len, tmp2
|
||||
BCS shortDst
|
||||
CMP dstend, tmp2
|
||||
BHI shortDst
|
||||
|
||||
SUB offset, dst, match
|
||||
CMP dstorig, match
|
||||
BHS copyMatchTry8
|
||||
|
||||
// match < dstorig means the match starts in the dictionary,
|
||||
// at len(dict) - offset + (dst - dstorig).
|
||||
SUB dstorig, dst, tmp1
|
||||
SUB offset, dictlen, tmp2
|
||||
ADDS tmp2, tmp1
|
||||
BMI shortDict
|
||||
ADD dict, tmp1, match
|
||||
|
||||
copyDict:
|
||||
MOVBU.P 1(match), tmp3
|
||||
MOVB.P tmp3, 1(dst)
|
||||
SUBS $1, len
|
||||
CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
|
||||
BNE copyDict
|
||||
|
||||
CBZ len, copyMatchDone
|
||||
|
||||
// If the match extends beyond the dictionary, the rest is at dstorig.
|
||||
// Recompute the offset for the next check.
|
||||
MOVD dstorig, match
|
||||
SUB dstorig, dst, offset
|
||||
|
||||
copyMatchTry8:
|
||||
// Copy doublewords if both len and offset are at least eight.
|
||||
// A 16-at-a-time loop doesn't provide a further speedup.
|
||||
CMP $8, len
|
||||
CCMP HS, offset, $8, $0
|
||||
BLO copyMatchLoop1
|
||||
|
||||
AND $7, len, lenRem
|
||||
SUB $8, len
|
||||
copyMatchLoop8:
|
||||
MOVD.P 8(match), tmp1
|
||||
MOVD.P tmp1, 8(dst)
|
||||
SUBS $8, len
|
||||
BPL copyMatchLoop8
|
||||
|
||||
MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
|
||||
ADD lenRem, dst
|
||||
MOVD $0, len
|
||||
MOVD tmp2, -8(dst)
|
||||
B copyMatchDone
|
||||
|
||||
copyMatchLoop1:
|
||||
// Byte-at-a-time copy for small offsets.
|
||||
MOVBU.P 1(match), tmp2
|
||||
MOVB.P tmp2, 1(dst)
|
||||
SUBS $1, len
|
||||
BNE copyMatchLoop1
|
||||
|
||||
copyMatchDone:
|
||||
CMP src, srcend
|
||||
BNE loop
|
||||
|
||||
end:
|
||||
CBNZ len, corrupt
|
||||
SUB dstorig, dst, tmp1
|
||||
MOVD tmp1, ret+72(FP)
|
||||
RET
|
||||
|
||||
// The error cases have distinct labels so we can put different
|
||||
// return codes here when debugging, or if the error returns need to
|
||||
// be changed.
|
||||
shortDict:
|
||||
shortDst:
|
||||
shortSrc:
|
||||
corrupt:
|
||||
MOVD $-1, tmp1
|
||||
MOVD tmp1, ret+72(FP)
|
||||
RET
|
10
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
generated
vendored
Normal file
10
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
|
||||
// +build amd64 arm arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package lz4block
|
||||
|
||||
//go:noescape
|
||||
func decodeBlock(dst, src, dict []byte) int
|
139
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
generated
vendored
Normal file
139
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
|
||||
// +build !amd64,!arm,!arm64 appengine !gc noasm
|
||||
|
||||
package lz4block
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
func decodeBlock(dst, src, dict []byte) (ret int) {
|
||||
// Restrict capacities so we don't read or write out of bounds.
|
||||
dst = dst[:len(dst):len(dst)]
|
||||
src = src[:len(src):len(src)]
|
||||
|
||||
const hasError = -2
|
||||
|
||||
if len(src) == 0 {
|
||||
return hasError
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
ret = hasError
|
||||
}
|
||||
}()
|
||||
|
||||
var si, di uint
|
||||
for si < uint(len(src)) {
|
||||
// Literals and match lengths (token).
|
||||
b := uint(src[si])
|
||||
si++
|
||||
|
||||
// Literals.
|
||||
if lLen := b >> 4; lLen > 0 {
|
||||
switch {
|
||||
case lLen < 0xF && si+16 < uint(len(src)):
|
||||
// Shortcut 1
|
||||
// if we have enough room in src and dst, and the literals length
|
||||
// is small enough (0..14) then copy all 16 bytes, even if not all
|
||||
// are part of the literals.
|
||||
copy(dst[di:], src[si:si+16])
|
||||
si += lLen
|
||||
di += lLen
|
||||
if mLen := b & 0xF; mLen < 0xF {
|
||||
// Shortcut 2
|
||||
// if the match length (4..18) fits within the literals, then copy
|
||||
// all 18 bytes, even if not all are part of the literals.
|
||||
mLen += 4
|
||||
if offset := u16(src[si:]); mLen <= offset && offset < di {
|
||||
i := di - offset
|
||||
// The remaining buffer may not hold 18 bytes.
|
||||
// See https://github.com/pierrec/lz4/issues/51.
|
||||
if end := i + 18; end <= uint(len(dst)) {
|
||||
copy(dst[di:], dst[i:end])
|
||||
si += 2
|
||||
di += mLen
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
case lLen == 0xF:
|
||||
for {
|
||||
x := uint(src[si])
|
||||
if lLen += x; int(lLen) < 0 {
|
||||
return hasError
|
||||
}
|
||||
si++
|
||||
if x != 0xFF {
|
||||
break
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
copy(dst[di:di+lLen], src[si:si+lLen])
|
||||
si += lLen
|
||||
di += lLen
|
||||
}
|
||||
}
|
||||
|
||||
mLen := b & 0xF
|
||||
if si == uint(len(src)) && mLen == 0 {
|
||||
break
|
||||
} else if si >= uint(len(src)) {
|
||||
return hasError
|
||||
}
|
||||
|
||||
offset := u16(src[si:])
|
||||
if offset == 0 {
|
||||
return hasError
|
||||
}
|
||||
si += 2
|
||||
|
||||
// Match.
|
||||
mLen += minMatch
|
||||
if mLen == minMatch+0xF {
|
||||
for {
|
||||
x := uint(src[si])
|
||||
if mLen += x; int(mLen) < 0 {
|
||||
return hasError
|
||||
}
|
||||
si++
|
||||
if x != 0xFF {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the match.
|
||||
if di < offset {
|
||||
// The match is beyond our block, meaning the first part
|
||||
// is in the dictionary.
|
||||
fromDict := dict[uint(len(dict))+di-offset:]
|
||||
n := uint(copy(dst[di:di+mLen], fromDict))
|
||||
di += n
|
||||
if mLen -= n; mLen == 0 {
|
||||
continue
|
||||
}
|
||||
// We copied n = offset-di bytes from the dictionary,
|
||||
// then set di = di+n = offset, so the following code
|
||||
// copies from dst[di-offset:] = dst[0:].
|
||||
}
|
||||
|
||||
expanded := dst[di-offset:]
|
||||
if mLen > offset {
|
||||
// Efficiently copy the match dst[di-offset:di] into the dst slice.
|
||||
bytesToCopy := offset * (mLen / offset)
|
||||
for n := offset; n <= bytesToCopy+offset; n *= 2 {
|
||||
copy(expanded[n:], expanded[:n])
|
||||
}
|
||||
di += bytesToCopy
|
||||
mLen -= bytesToCopy
|
||||
}
|
||||
di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
|
||||
}
|
||||
|
||||
return int(di)
|
||||
}
|
||||
|
||||
func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }
|
19
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
generated
vendored
Normal file
19
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package lz4errors
|
||||
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
const (
|
||||
ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short"
|
||||
ErrInvalidFrame Error = "lz4: bad magic number"
|
||||
ErrInternalUnhandledState Error = "lz4: unhandled state"
|
||||
ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum"
|
||||
ErrInvalidBlockChecksum Error = "lz4: invalid block checksum"
|
||||
ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum"
|
||||
ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
|
||||
ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object"
|
||||
ErrOptionInvalidBlockSize Error = "lz4: invalid block size"
|
||||
ErrOptionNotApplicable Error = "lz4: option not applicable"
|
||||
ErrWriterNotClosed Error = "lz4: writer not closed"
|
||||
)
|
350
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
generated
vendored
Normal file
350
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
package lz4stream
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
"github.com/pierrec/lz4/v4/internal/xxh32"
|
||||
)
|
||||
|
||||
type Blocks struct {
|
||||
Block *FrameDataBlock
|
||||
Blocks chan chan *FrameDataBlock
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
|
||||
if num == 1 {
|
||||
b.Blocks = nil
|
||||
b.Block = NewFrameDataBlock(f)
|
||||
return
|
||||
}
|
||||
b.Block = nil
|
||||
if cap(b.Blocks) != num {
|
||||
b.Blocks = make(chan chan *FrameDataBlock, num)
|
||||
}
|
||||
// goroutine managing concurrent block compression goroutines.
|
||||
go func() {
|
||||
// Process next block compression item.
|
||||
for c := range b.Blocks {
|
||||
// Read the next compressed block result.
|
||||
// Waiting here ensures that the blocks are output in the order they were sent.
|
||||
// The incoming channel is always closed as it indicates to the caller that
|
||||
// the block has been processed.
|
||||
block := <-c
|
||||
if block == nil {
|
||||
// Notify the block compression routine that we are done with its result.
|
||||
// This is used when a sentinel block is sent to terminate the compression.
|
||||
close(c)
|
||||
return
|
||||
}
|
||||
// Do not attempt to write the block upon any previous failure.
|
||||
if b.err == nil {
|
||||
// Write the block.
|
||||
if err := block.Write(f, dst); err != nil {
|
||||
// Keep the first error.
|
||||
b.err = err
|
||||
// All pending compression goroutines need to shut down, so we need to keep going.
|
||||
}
|
||||
}
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *Blocks) close(f *Frame, num int) error {
|
||||
if num == 1 {
|
||||
if b.Block != nil {
|
||||
b.Block.Close(f)
|
||||
}
|
||||
err := b.err
|
||||
b.err = nil
|
||||
return err
|
||||
}
|
||||
if b.Blocks == nil {
|
||||
err := b.err
|
||||
b.err = nil
|
||||
return err
|
||||
}
|
||||
c := make(chan *FrameDataBlock)
|
||||
b.Blocks <- c
|
||||
c <- nil
|
||||
<-c
|
||||
err := b.err
|
||||
b.err = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// ErrorR returns any error set while uncompressing a stream.
|
||||
func (b *Blocks) ErrorR() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.err
|
||||
}
|
||||
|
||||
// initR returns a channel that streams the uncompressed blocks if in concurrent
|
||||
// mode and no error. When the channel is closed, check for any error with b.ErrorR.
|
||||
//
|
||||
// If not in concurrent mode, the uncompressed block is b.Block and the returned error
|
||||
// needs to be checked.
|
||||
func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
|
||||
size := f.Descriptor.Flags.BlockSizeIndex()
|
||||
if num == 1 {
|
||||
b.Blocks = nil
|
||||
b.Block = NewFrameDataBlock(f)
|
||||
return nil, nil
|
||||
}
|
||||
b.Block = nil
|
||||
blocks := make(chan chan []byte, num)
|
||||
// data receives the uncompressed blocks.
|
||||
data := make(chan []byte)
|
||||
// Read blocks from the source sequentially
|
||||
// and uncompress them concurrently.
|
||||
|
||||
// In legacy mode, accrue the uncompress sizes in cum.
|
||||
var cum uint32
|
||||
go func() {
|
||||
var cumx uint32
|
||||
var err error
|
||||
for b.ErrorR() == nil {
|
||||
block := NewFrameDataBlock(f)
|
||||
cumx, err = block.Read(f, src, 0)
|
||||
if err != nil {
|
||||
block.Close(f)
|
||||
break
|
||||
}
|
||||
// Recheck for an error as reading may be slow and uncompressing is expensive.
|
||||
if b.ErrorR() != nil {
|
||||
block.Close(f)
|
||||
break
|
||||
}
|
||||
c := make(chan []byte)
|
||||
blocks <- c
|
||||
go func() {
|
||||
defer block.Close(f)
|
||||
data, err := block.Uncompress(f, size.Get(), nil, false)
|
||||
if err != nil {
|
||||
b.closeR(err)
|
||||
// Close the block channel to indicate an error.
|
||||
close(c)
|
||||
} else {
|
||||
c <- data
|
||||
}
|
||||
}()
|
||||
}
|
||||
// End the collection loop and the data channel.
|
||||
c := make(chan []byte)
|
||||
blocks <- c
|
||||
c <- nil // signal the collection loop that we are done
|
||||
<-c // wait for the collect loop to complete
|
||||
if f.isLegacy() && cum == cumx {
|
||||
err = io.EOF
|
||||
}
|
||||
b.closeR(err)
|
||||
close(data)
|
||||
}()
|
||||
// Collect the uncompressed blocks and make them available
|
||||
// on the returned channel.
|
||||
go func(leg bool) {
|
||||
defer close(blocks)
|
||||
skipBlocks := false
|
||||
for c := range blocks {
|
||||
buf, ok := <-c
|
||||
if !ok {
|
||||
// A closed channel indicates an error.
|
||||
// All remaining channels should be discarded.
|
||||
skipBlocks = true
|
||||
continue
|
||||
}
|
||||
if buf == nil {
|
||||
// Signal to end the loop.
|
||||
close(c)
|
||||
return
|
||||
}
|
||||
if skipBlocks {
|
||||
// A previous error has occurred, skipping remaining channels.
|
||||
continue
|
||||
}
|
||||
// Perform checksum now as the blocks are received in order.
|
||||
if f.Descriptor.Flags.ContentChecksum() {
|
||||
_, _ = f.checksum.Write(buf)
|
||||
}
|
||||
if leg {
|
||||
cum += uint32(len(buf))
|
||||
}
|
||||
data <- buf
|
||||
close(c)
|
||||
}
|
||||
}(f.isLegacy())
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// closeR safely sets the error on b if not already set.
|
||||
func (b *Blocks) closeR(err error) {
|
||||
b.mu.Lock()
|
||||
if b.err == nil {
|
||||
b.err = err
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func NewFrameDataBlock(f *Frame) *FrameDataBlock {
|
||||
buf := f.Descriptor.Flags.BlockSizeIndex().Get()
|
||||
return &FrameDataBlock{Data: buf, data: buf}
|
||||
}
|
||||
|
||||
type FrameDataBlock struct {
|
||||
Size DataBlockSize
|
||||
Data []byte // compressed or uncompressed data (.data or .src)
|
||||
Checksum uint32
|
||||
data []byte // buffer for compressed data
|
||||
src []byte // uncompressed data
|
||||
err error // used in concurrent mode
|
||||
}
|
||||
|
||||
func (b *FrameDataBlock) Close(f *Frame) {
|
||||
b.Size = 0
|
||||
b.Checksum = 0
|
||||
b.err = nil
|
||||
if b.data != nil {
|
||||
// Block was not already closed.
|
||||
lz4block.Put(b.data)
|
||||
b.Data = nil
|
||||
b.data = nil
|
||||
b.src = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Block compression errors are ignored since the buffer is sized appropriately.
|
||||
func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
|
||||
data := b.data
|
||||
if f.isLegacy() {
|
||||
// In legacy mode, the buffer is sized according to CompressBlockBound,
|
||||
// but only 8Mb is buffered for compression.
|
||||
src = src[:8<<20]
|
||||
} else {
|
||||
data = data[:len(src)] // trigger the incompressible flag in CompressBlock
|
||||
}
|
||||
var n int
|
||||
switch level {
|
||||
case lz4block.Fast:
|
||||
n, _ = lz4block.CompressBlock(src, data)
|
||||
default:
|
||||
n, _ = lz4block.CompressBlockHC(src, data, level)
|
||||
}
|
||||
if n == 0 {
|
||||
b.Size.UncompressedSet(true)
|
||||
b.Data = src
|
||||
} else {
|
||||
b.Size.UncompressedSet(false)
|
||||
b.Data = data[:n]
|
||||
}
|
||||
b.Size.sizeSet(len(b.Data))
|
||||
b.src = src // keep track of the source for content checksum
|
||||
|
||||
if f.Descriptor.Flags.BlockChecksum() {
|
||||
b.Checksum = xxh32.ChecksumZero(src)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
|
||||
// Write is called in the same order as blocks are compressed,
|
||||
// so content checksum must be done here.
|
||||
if f.Descriptor.Flags.ContentChecksum() {
|
||||
_, _ = f.checksum.Write(b.src)
|
||||
}
|
||||
buf := f.buf[:]
|
||||
binary.LittleEndian.PutUint32(buf, uint32(b.Size))
|
||||
if _, err := dst.Write(buf[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := dst.Write(b.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Checksum == 0 {
|
||||
return nil
|
||||
}
|
||||
binary.LittleEndian.PutUint32(buf, b.Checksum)
|
||||
_, err := dst.Write(buf[:4])
|
||||
return err
|
||||
}
|
||||
|
||||
// Read updates b with the next block data, size and checksum if available.
|
||||
func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
|
||||
x, err := f.readUint32(src)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if f.isLegacy() {
|
||||
switch x {
|
||||
case frameMagicLegacy:
|
||||
// Concatenated legacy frame.
|
||||
return b.Read(f, src, cum)
|
||||
case cum:
|
||||
// Only works in non concurrent mode, for concurrent mode
|
||||
// it is handled separately.
|
||||
// Linux kernel format appends the total uncompressed size at the end.
|
||||
return 0, io.EOF
|
||||
}
|
||||
} else if x == 0 {
|
||||
// Marker for end of stream.
|
||||
return 0, io.EOF
|
||||
}
|
||||
b.Size = DataBlockSize(x)
|
||||
|
||||
size := b.Size.size()
|
||||
if size > cap(b.data) {
|
||||
return x, lz4errors.ErrOptionInvalidBlockSize
|
||||
}
|
||||
b.data = b.data[:size]
|
||||
if _, err := io.ReadFull(src, b.data); err != nil {
|
||||
return x, err
|
||||
}
|
||||
if f.Descriptor.Flags.BlockChecksum() {
|
||||
sum, err := f.readUint32(src)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
b.Checksum = sum
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
|
||||
if b.Size.Uncompressed() {
|
||||
n := copy(dst, b.data)
|
||||
dst = dst[:n]
|
||||
} else {
|
||||
n, err := lz4block.UncompressBlock(b.data, dst, dict)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dst = dst[:n]
|
||||
}
|
||||
if f.Descriptor.Flags.BlockChecksum() {
|
||||
if c := xxh32.ChecksumZero(dst); c != b.Checksum {
|
||||
err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sum && f.Descriptor.Flags.ContentChecksum() {
|
||||
_, _ = f.checksum.Write(dst)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
|
||||
if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
|
||||
return
|
||||
}
|
||||
x = binary.LittleEndian.Uint32(f.buf[:4])
|
||||
return
|
||||
}
|
204
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
generated
vendored
Normal file
204
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// Package lz4stream provides the types that support reading and writing LZ4 data streams.
|
||||
package lz4stream
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
"github.com/pierrec/lz4/v4/internal/xxh32"
|
||||
)
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
const (
|
||||
frameMagic uint32 = 0x184D2204
|
||||
frameSkipMagic uint32 = 0x184D2A50
|
||||
frameMagicLegacy uint32 = 0x184C2102
|
||||
)
|
||||
|
||||
func NewFrame() *Frame {
|
||||
return &Frame{}
|
||||
}
|
||||
|
||||
type Frame struct {
|
||||
buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
|
||||
Magic uint32
|
||||
Descriptor FrameDescriptor
|
||||
Blocks Blocks
|
||||
Checksum uint32
|
||||
checksum xxh32.XXHZero
|
||||
}
|
||||
|
||||
// Reset allows reusing the Frame.
|
||||
// The Descriptor configuration is not modified.
|
||||
func (f *Frame) Reset(num int) {
|
||||
f.Magic = 0
|
||||
f.Descriptor.Checksum = 0
|
||||
f.Descriptor.ContentSize = 0
|
||||
_ = f.Blocks.close(f, num)
|
||||
f.Checksum = 0
|
||||
}
|
||||
|
||||
func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
|
||||
if legacy {
|
||||
f.Magic = frameMagicLegacy
|
||||
idx := lz4block.Index(lz4block.Block8Mb)
|
||||
f.Descriptor.Flags.BlockSizeIndexSet(idx)
|
||||
} else {
|
||||
f.Magic = frameMagic
|
||||
f.Descriptor.initW()
|
||||
}
|
||||
f.Blocks.initW(f, dst, num)
|
||||
f.checksum.Reset()
|
||||
}
|
||||
|
||||
func (f *Frame) CloseW(dst io.Writer, num int) error {
|
||||
if err := f.Blocks.close(f, num); err != nil {
|
||||
return err
|
||||
}
|
||||
if f.isLegacy() {
|
||||
return nil
|
||||
}
|
||||
buf := f.buf[:0]
|
||||
// End mark (data block size of uint32(0)).
|
||||
buf = append(buf, 0, 0, 0, 0)
|
||||
if f.Descriptor.Flags.ContentChecksum() {
|
||||
buf = f.checksum.Sum(buf)
|
||||
}
|
||||
_, err := dst.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Frame) isLegacy() bool {
|
||||
return f.Magic == frameMagicLegacy
|
||||
}
|
||||
|
||||
func (f *Frame) ParseHeaders(src io.Reader) error {
|
||||
if f.Magic > 0 {
|
||||
// Header already read.
|
||||
return nil
|
||||
}
|
||||
|
||||
newFrame:
|
||||
var err error
|
||||
if f.Magic, err = f.readUint32(src); err != nil {
|
||||
return err
|
||||
}
|
||||
switch m := f.Magic; {
|
||||
case m == frameMagic || m == frameMagicLegacy:
|
||||
// All 16 values of frameSkipMagic are valid.
|
||||
case m>>8 == frameSkipMagic>>8:
|
||||
skip, err := f.readUint32(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
|
||||
return err
|
||||
}
|
||||
goto newFrame
|
||||
default:
|
||||
return lz4errors.ErrInvalidFrame
|
||||
}
|
||||
if err := f.Descriptor.initR(f, src); err != nil {
|
||||
return err
|
||||
}
|
||||
f.checksum.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
|
||||
return f.Blocks.initR(f, num, src)
|
||||
}
|
||||
|
||||
func (f *Frame) CloseR(src io.Reader) (err error) {
|
||||
if f.isLegacy() {
|
||||
return nil
|
||||
}
|
||||
if !f.Descriptor.Flags.ContentChecksum() {
|
||||
return nil
|
||||
}
|
||||
if f.Checksum, err = f.readUint32(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if c := f.checksum.Sum32(); c != f.Checksum {
|
||||
return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FrameDescriptor struct {
|
||||
Flags DescriptorFlags
|
||||
ContentSize uint64
|
||||
Checksum uint8
|
||||
}
|
||||
|
||||
func (fd *FrameDescriptor) initW() {
|
||||
fd.Flags.VersionSet(1)
|
||||
fd.Flags.BlockIndependenceSet(true)
|
||||
}
|
||||
|
||||
func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
|
||||
if fd.Checksum > 0 {
|
||||
// Header already written.
|
||||
return nil
|
||||
}
|
||||
|
||||
buf := f.buf[:4]
|
||||
// Write the magic number here even though it belongs to the Frame.
|
||||
binary.LittleEndian.PutUint32(buf, f.Magic)
|
||||
if !f.isLegacy() {
|
||||
buf = buf[:4+2]
|
||||
binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
|
||||
|
||||
if fd.Flags.Size() {
|
||||
buf = buf[:4+2+8]
|
||||
binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
|
||||
}
|
||||
fd.Checksum = descriptorChecksum(buf[4:])
|
||||
buf = append(buf, fd.Checksum)
|
||||
}
|
||||
|
||||
_, err := dst.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
|
||||
if f.isLegacy() {
|
||||
idx := lz4block.Index(lz4block.Block8Mb)
|
||||
f.Descriptor.Flags.BlockSizeIndexSet(idx)
|
||||
return nil
|
||||
}
|
||||
// Read the flags and the checksum, hoping that there is not content size.
|
||||
buf := f.buf[:3]
|
||||
if _, err := io.ReadFull(src, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
descr := binary.LittleEndian.Uint16(buf)
|
||||
fd.Flags = DescriptorFlags(descr)
|
||||
if fd.Flags.Size() {
|
||||
// Append the 8 missing bytes.
|
||||
buf = buf[:3+8]
|
||||
if _, err := io.ReadFull(src, buf[3:]); err != nil {
|
||||
return err
|
||||
}
|
||||
fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
|
||||
}
|
||||
fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
|
||||
buf = buf[:len(buf)-1] // all descriptor fields except checksum
|
||||
if c := descriptorChecksum(buf); fd.Checksum != c {
|
||||
return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
|
||||
}
|
||||
// Validate the elements that can be.
|
||||
if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
|
||||
return lz4errors.ErrOptionInvalidBlockSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func descriptorChecksum(buf []byte) byte {
|
||||
return byte(xxh32.ChecksumZero(buf) >> 8)
|
||||
}
|
103
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
generated
vendored
Normal file
103
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Code generated by `gen.exe`. DO NOT EDIT.
|
||||
|
||||
package lz4stream
|
||||
|
||||
import "github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
|
||||
// DescriptorFlags is defined as follow:
|
||||
// field bits
|
||||
// ----- ----
|
||||
// _ 2
|
||||
// ContentChecksum 1
|
||||
// Size 1
|
||||
// BlockChecksum 1
|
||||
// BlockIndependence 1
|
||||
// Version 2
|
||||
// _ 4
|
||||
// BlockSizeIndex 3
|
||||
// _ 1
|
||||
type DescriptorFlags uint16
|
||||
|
||||
// Getters.
|
||||
func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 }
|
||||
func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 }
|
||||
func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 }
|
||||
func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
|
||||
func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) }
|
||||
func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
|
||||
return lz4block.BlockSizeIndex(x >> 12 & 0x7)
|
||||
}
|
||||
|
||||
// Setters.
|
||||
func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
|
||||
const b = 1 << 2
|
||||
if v {
|
||||
*x = *x&^b | b
|
||||
} else {
|
||||
*x &^= b
|
||||
}
|
||||
return x
|
||||
}
|
||||
func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
|
||||
const b = 1 << 3
|
||||
if v {
|
||||
*x = *x&^b | b
|
||||
} else {
|
||||
*x &^= b
|
||||
}
|
||||
return x
|
||||
}
|
||||
func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
|
||||
const b = 1 << 4
|
||||
if v {
|
||||
*x = *x&^b | b
|
||||
} else {
|
||||
*x &^= b
|
||||
}
|
||||
return x
|
||||
}
|
||||
func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
|
||||
const b = 1 << 5
|
||||
if v {
|
||||
*x = *x&^b | b
|
||||
} else {
|
||||
*x &^= b
|
||||
}
|
||||
return x
|
||||
}
|
||||
func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
|
||||
*x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
|
||||
return x
|
||||
}
|
||||
func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
|
||||
*x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
|
||||
return x
|
||||
}
|
||||
|
||||
// Code generated by `gen.exe`. DO NOT EDIT.
|
||||
|
||||
// DataBlockSize is defined as follow:
|
||||
// field bits
|
||||
// ----- ----
|
||||
// size 31
|
||||
// Uncompressed 1
|
||||
type DataBlockSize uint32
|
||||
|
||||
// Getters.
|
||||
func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) }
|
||||
func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
|
||||
|
||||
// Setters.
|
||||
func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
|
||||
*x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
|
||||
return x
|
||||
}
|
||||
func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
|
||||
const b = 1 << 31
|
||||
if v {
|
||||
*x = *x&^b | b
|
||||
} else {
|
||||
*x &^= b
|
||||
}
|
||||
return x
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
|
||||
// (https://github.com/Cyan4973/XXH/)
|
||||
// (ported from the reference implementation https://github.com/Cyan4973/xxHash/)
|
||||
package xxh32
|
||||
|
||||
import (
|
||||
@@ -20,10 +20,7 @@ const (
|
||||
|
||||
// XXHZero represents an xxhash32 object with seed 0.
|
||||
type XXHZero struct {
|
||||
v1 uint32
|
||||
v2 uint32
|
||||
v3 uint32
|
||||
v4 uint32
|
||||
v [4]uint32
|
||||
totalLen uint64
|
||||
buf [16]byte
|
||||
bufused int
|
||||
@@ -38,10 +35,10 @@ func (xxh XXHZero) Sum(b []byte) []byte {
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (xxh *XXHZero) Reset() {
|
||||
xxh.v1 = prime1plus2
|
||||
xxh.v2 = prime2
|
||||
xxh.v3 = 0
|
||||
xxh.v4 = prime1minus
|
||||
xxh.v[0] = prime1plus2
|
||||
xxh.v[1] = prime2
|
||||
xxh.v[2] = 0
|
||||
xxh.v[3] = prime1minus
|
||||
xxh.totalLen = 0
|
||||
xxh.bufused = 0
|
||||
}
|
||||
@@ -51,7 +48,7 @@ func (xxh *XXHZero) Size() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
// BlockSize gives the minimum number of bytes accepted by Write().
|
||||
// BlockSizeIndex gives the minimum number of bytes accepted by Write().
|
||||
func (xxh *XXHZero) BlockSize() int {
|
||||
return 1
|
||||
}
|
||||
@@ -74,44 +71,48 @@ func (xxh *XXHZero) Write(input []byte) (int, error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
p := 0
|
||||
// Causes compiler to work directly from registers instead of stack:
|
||||
v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
|
||||
if m > 0 {
|
||||
var buf *[16]byte
|
||||
if m != 0 {
|
||||
// some data left from previous update
|
||||
copy(xxh.buf[xxh.bufused:], input[:r])
|
||||
xxh.bufused += len(input) - r
|
||||
buf = &xxh.buf
|
||||
c := copy(buf[m:], input)
|
||||
n -= c
|
||||
input = input[c:]
|
||||
}
|
||||
update(&xxh.v, buf, input)
|
||||
xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
|
||||
|
||||
// fast rotl(13)
|
||||
buf := xxh.buf[:16] // BCE hint.
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Portable version of update. This updates v by processing all of buf
|
||||
// (if not nil) and all full 16-byte blocks of input.
|
||||
func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
|
||||
// Causes compiler to work directly from registers instead of stack:
|
||||
v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
|
||||
|
||||
if buf != nil {
|
||||
v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
|
||||
v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
|
||||
v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
|
||||
v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
|
||||
p = r
|
||||
xxh.bufused = 0
|
||||
}
|
||||
|
||||
for n := n - 16; p <= n; p += 16 {
|
||||
sub := input[p:][:16] //BCE hint for compiler
|
||||
for ; len(input) >= 16; input = input[16:] {
|
||||
sub := input[:16] //BCE hint for compiler
|
||||
v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
|
||||
v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
|
||||
v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
|
||||
v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
|
||||
}
|
||||
xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
|
||||
|
||||
copy(xxh.buf[xxh.bufused:], input[p:])
|
||||
xxh.bufused += len(input) - p
|
||||
|
||||
return n, nil
|
||||
v[0], v[1], v[2], v[3] = v1, v2, v3, v4
|
||||
}
|
||||
|
||||
// Sum32 returns the 32 bits Hash value.
|
||||
func (xxh *XXHZero) Sum32() uint32 {
|
||||
h32 := uint32(xxh.totalLen)
|
||||
if h32 >= 16 {
|
||||
h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
|
||||
h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
|
||||
} else {
|
||||
h32 += prime5
|
||||
}
|
||||
@@ -137,8 +138,8 @@ func (xxh *XXHZero) Sum32() uint32 {
|
||||
return h32
|
||||
}
|
||||
|
||||
// ChecksumZero returns the 32bits Hash value.
|
||||
func ChecksumZero(input []byte) uint32 {
|
||||
// Portable version of ChecksumZero.
|
||||
func checksumZeroGo(input []byte) uint32 {
|
||||
n := len(input)
|
||||
h32 := uint32(n)
|
||||
|
||||
@@ -182,18 +183,6 @@ func ChecksumZero(input []byte) uint32 {
|
||||
return h32
|
||||
}
|
||||
|
||||
// Uint32Zero hashes x with seed 0.
|
||||
func Uint32Zero(x uint32) uint32 {
|
||||
h := prime5 + 4 + x*prime3
|
||||
h = rol17(h) * prime4
|
||||
h ^= h >> 15
|
||||
h *= prime2
|
||||
h ^= h >> 13
|
||||
h *= prime3
|
||||
h ^= h >> 16
|
||||
return h
|
||||
}
|
||||
|
||||
func rol1(u uint32) uint32 {
|
||||
return u<<1 | u>>31
|
||||
}
|
11
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
generated
vendored
Normal file
11
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !noasm
|
||||
|
||||
package xxh32
|
||||
|
||||
// ChecksumZero returns the 32-bit hash of input.
|
||||
//
|
||||
//go:noescape
|
||||
func ChecksumZero(input []byte) uint32
|
||||
|
||||
//go:noescape
|
||||
func update(v *[4]uint32, buf *[16]byte, input []byte)
|
251
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
generated
vendored
Normal file
251
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
// +build !noasm
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation.
|
||||
#define p R0
|
||||
#define n R1
|
||||
#define h R2
|
||||
#define v1 R2 // Alias for h.
|
||||
#define v2 R3
|
||||
#define v3 R4
|
||||
#define v4 R5
|
||||
#define x1 R6
|
||||
#define x2 R7
|
||||
#define x3 R8
|
||||
#define x4 R9
|
||||
|
||||
// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
|
||||
#define prime1r R11
|
||||
#define prime2r R12
|
||||
#define prime3r R3 // The rest can alias v{2-4}.
|
||||
#define prime4r R4
|
||||
#define prime5r R5
|
||||
|
||||
// Update round macros. These read from and increment p.
|
||||
|
||||
#define round16aligned \
|
||||
MOVM.IA.W (p), [x1, x2, x3, x4] \
|
||||
\
|
||||
MULA x1, prime2r, v1, v1 \
|
||||
MULA x2, prime2r, v2, v2 \
|
||||
MULA x3, prime2r, v3, v3 \
|
||||
MULA x4, prime2r, v4, v4 \
|
||||
\
|
||||
MOVW v1 @> 19, v1 \
|
||||
MOVW v2 @> 19, v2 \
|
||||
MOVW v3 @> 19, v3 \
|
||||
MOVW v4 @> 19, v4 \
|
||||
\
|
||||
MUL prime1r, v1 \
|
||||
MUL prime1r, v2 \
|
||||
MUL prime1r, v3 \
|
||||
MUL prime1r, v4 \
|
||||
|
||||
#define round16unaligned \
|
||||
MOVBU.P 16(p), x1 \
|
||||
MOVBU -15(p), x2 \
|
||||
ORR x2 << 8, x1 \
|
||||
MOVBU -14(p), x3 \
|
||||
MOVBU -13(p), x4 \
|
||||
ORR x4 << 8, x3 \
|
||||
ORR x3 << 16, x1 \
|
||||
\
|
||||
MULA x1, prime2r, v1, v1 \
|
||||
MOVW v1 @> 19, v1 \
|
||||
MUL prime1r, v1 \
|
||||
\
|
||||
MOVBU -12(p), x1 \
|
||||
MOVBU -11(p), x2 \
|
||||
ORR x2 << 8, x1 \
|
||||
MOVBU -10(p), x3 \
|
||||
MOVBU -9(p), x4 \
|
||||
ORR x4 << 8, x3 \
|
||||
ORR x3 << 16, x1 \
|
||||
\
|
||||
MULA x1, prime2r, v2, v2 \
|
||||
MOVW v2 @> 19, v2 \
|
||||
MUL prime1r, v2 \
|
||||
\
|
||||
MOVBU -8(p), x1 \
|
||||
MOVBU -7(p), x2 \
|
||||
ORR x2 << 8, x1 \
|
||||
MOVBU -6(p), x3 \
|
||||
MOVBU -5(p), x4 \
|
||||
ORR x4 << 8, x3 \
|
||||
ORR x3 << 16, x1 \
|
||||
\
|
||||
MULA x1, prime2r, v3, v3 \
|
||||
MOVW v3 @> 19, v3 \
|
||||
MUL prime1r, v3 \
|
||||
\
|
||||
MOVBU -4(p), x1 \
|
||||
MOVBU -3(p), x2 \
|
||||
ORR x2 << 8, x1 \
|
||||
MOVBU -2(p), x3 \
|
||||
MOVBU -1(p), x4 \
|
||||
ORR x4 << 8, x3 \
|
||||
ORR x3 << 16, x1 \
|
||||
\
|
||||
MULA x1, prime2r, v4, v4 \
|
||||
MOVW v4 @> 19, v4 \
|
||||
MUL prime1r, v4 \
|
||||
|
||||
|
||||
// func ChecksumZero([]byte) uint32
|
||||
TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
|
||||
MOVW input_base+0(FP), p
|
||||
MOVW input_len+4(FP), n
|
||||
|
||||
MOVW $const_prime1, prime1r
|
||||
MOVW $const_prime2, prime2r
|
||||
|
||||
// Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
|
||||
// here, but that's a pseudo-op that generates a load through R11.
|
||||
MOVW $const_prime5, prime5r
|
||||
ADD prime5r, n, h
|
||||
CMP $0, n
|
||||
BEQ end
|
||||
|
||||
// We let n go negative so we can do comparisons with SUB.S
|
||||
// instead of separate CMP.
|
||||
SUB.S $16, n
|
||||
BMI loop16done
|
||||
|
||||
ADD prime1r, prime2r, v1
|
||||
MOVW prime2r, v2
|
||||
MOVW $0, v3
|
||||
RSB $0, prime1r, v4
|
||||
|
||||
TST $3, p
|
||||
BNE loop16unaligned
|
||||
|
||||
loop16aligned:
|
||||
SUB.S $16, n
|
||||
round16aligned
|
||||
BPL loop16aligned
|
||||
B loop16finish
|
||||
|
||||
loop16unaligned:
|
||||
SUB.S $16, n
|
||||
round16unaligned
|
||||
BPL loop16unaligned
|
||||
|
||||
loop16finish:
|
||||
MOVW v1 @> 31, h
|
||||
ADD v2 @> 25, h
|
||||
ADD v3 @> 20, h
|
||||
ADD v4 @> 14, h
|
||||
|
||||
// h += len(input) with v2 as temporary.
|
||||
MOVW input_len+4(FP), v2
|
||||
ADD v2, h
|
||||
|
||||
loop16done:
|
||||
ADD $16, n // Restore number of bytes left.
|
||||
|
||||
SUB.S $4, n
|
||||
MOVW $const_prime3, prime3r
|
||||
BMI loop4done
|
||||
MOVW $const_prime4, prime4r
|
||||
|
||||
TST $3, p
|
||||
BNE loop4unaligned
|
||||
|
||||
loop4aligned:
|
||||
SUB.S $4, n
|
||||
|
||||
MOVW.P 4(p), x1
|
||||
MULA prime3r, x1, h, h
|
||||
MOVW h @> 15, h
|
||||
MUL prime4r, h
|
||||
|
||||
BPL loop4aligned
|
||||
B loop4done
|
||||
|
||||
loop4unaligned:
|
||||
SUB.S $4, n
|
||||
|
||||
MOVBU.P 4(p), x1
|
||||
MOVBU -3(p), x2
|
||||
ORR x2 << 8, x1
|
||||
MOVBU -2(p), x3
|
||||
ORR x3 << 16, x1
|
||||
MOVBU -1(p), x4
|
||||
ORR x4 << 24, x1
|
||||
|
||||
MULA prime3r, x1, h, h
|
||||
MOVW h @> 15, h
|
||||
MUL prime4r, h
|
||||
|
||||
BPL loop4unaligned
|
||||
|
||||
loop4done:
|
||||
ADD.S $4, n // Restore number of bytes left.
|
||||
BEQ end
|
||||
|
||||
MOVW $const_prime5, prime5r
|
||||
|
||||
loop1:
|
||||
SUB.S $1, n
|
||||
|
||||
MOVBU.P 1(p), x1
|
||||
MULA prime5r, x1, h, h
|
||||
MOVW h @> 21, h
|
||||
MUL prime1r, h
|
||||
|
||||
BNE loop1
|
||||
|
||||
end:
|
||||
MOVW $const_prime3, prime3r
|
||||
EOR h >> 15, h
|
||||
MUL prime2r, h
|
||||
EOR h >> 13, h
|
||||
MUL prime3r, h
|
||||
EOR h >> 16, h
|
||||
|
||||
MOVW h, ret+12(FP)
|
||||
RET
|
||||
|
||||
|
||||
// func update(v *[4]uint64, buf *[16]byte, p []byte)
|
||||
TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
|
||||
MOVW v+0(FP), p
|
||||
MOVM.IA (p), [v1, v2, v3, v4]
|
||||
|
||||
MOVW $const_prime1, prime1r
|
||||
MOVW $const_prime2, prime2r
|
||||
|
||||
// Process buf, if not nil.
|
||||
MOVW buf+4(FP), p
|
||||
CMP $0, p
|
||||
BEQ noBuffered
|
||||
|
||||
round16aligned
|
||||
|
||||
noBuffered:
|
||||
MOVW input_base +8(FP), p
|
||||
MOVW input_len +12(FP), n
|
||||
|
||||
SUB.S $16, n
|
||||
BMI end
|
||||
|
||||
TST $3, p
|
||||
BNE loop16unaligned
|
||||
|
||||
loop16aligned:
|
||||
SUB.S $16, n
|
||||
round16aligned
|
||||
BPL loop16aligned
|
||||
B end
|
||||
|
||||
loop16unaligned:
|
||||
SUB.S $16, n
|
||||
round16unaligned
|
||||
BPL loop16unaligned
|
||||
|
||||
end:
|
||||
MOVW v+0(FP), p
|
||||
MOVM.IA [v1, v2, v3, v4], (p)
|
||||
RET
|
10
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
generated
vendored
Normal file
10
pkg/metadata/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build !arm noasm
|
||||
|
||||
package xxh32
|
||||
|
||||
// ChecksumZero returns the 32-bit hash of input.
|
||||
func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
|
||||
|
||||
func update(v *[4]uint32, buf *[16]byte, input []byte) {
|
||||
updateGo(v, buf, input)
|
||||
}
|
157
pkg/metadata/vendor/github.com/pierrec/lz4/v4/lz4.go
generated
vendored
Normal file
157
pkg/metadata/vendor/github.com/pierrec/lz4/v4/lz4.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Package lz4 implements reading and writing lz4 compressed data.
|
||||
//
|
||||
// The package supports both the LZ4 stream format,
|
||||
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
|
||||
// and the LZ4 block format, defined at
|
||||
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
|
||||
//
|
||||
// See https://github.com/lz4/lz4 for the reference C implementation.
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
)
|
||||
|
||||
func _() {
|
||||
// Safety checks for duplicated elements.
|
||||
var x [1]struct{}
|
||||
_ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
|
||||
_ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
|
||||
_ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
|
||||
_ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
|
||||
_ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
|
||||
}
|
||||
|
||||
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
|
||||
func CompressBlockBound(n int) int {
|
||||
return lz4block.CompressBlockBound(n)
|
||||
}
|
||||
|
||||
// UncompressBlock uncompresses the source buffer into the destination one,
|
||||
// and returns the uncompressed size.
|
||||
//
|
||||
// The destination buffer must be sized appropriately.
|
||||
//
|
||||
// An error is returned if the source data is invalid or the destination buffer is too small.
|
||||
func UncompressBlock(src, dst []byte) (int, error) {
|
||||
return lz4block.UncompressBlock(src, dst, nil)
|
||||
}
|
||||
|
||||
// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
|
||||
// dictionary, and returns the uncompressed size.
|
||||
//
|
||||
// The destination buffer must be sized appropriately.
|
||||
//
|
||||
// An error is returned if the source data is invalid or the destination buffer is too small.
|
||||
func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
|
||||
return lz4block.UncompressBlock(src, dst, dict)
|
||||
}
|
||||
|
||||
// A Compressor compresses data into the LZ4 block format.
|
||||
// It uses a fast compression algorithm.
|
||||
//
|
||||
// A Compressor is not safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// Use a Writer to compress into the LZ4 stream format.
|
||||
type Compressor struct{ c lz4block.Compressor }
|
||||
|
||||
// CompressBlock compresses the source buffer src into the destination dst.
|
||||
//
|
||||
// If compression is successful, the first return value is the size of the
|
||||
// compressed data, which is always >0.
|
||||
//
|
||||
// If dst has length at least CompressBlockBound(len(src)), compression always
|
||||
// succeeds. Otherwise, the first return value is zero. The error return is
|
||||
// non-nil if the compressed data does not fit in dst, but it might fit in a
|
||||
// larger buffer that is still smaller than CompressBlockBound(len(src)). The
|
||||
// return value (0, nil) means the data is likely incompressible and a buffer
|
||||
// of length CompressBlockBound(len(src)) should be passed in.
|
||||
func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
|
||||
return c.c.CompressBlock(src, dst)
|
||||
}
|
||||
|
||||
// CompressBlock compresses the source buffer into the destination one.
|
||||
// This is the fast version of LZ4 compression and also the default one.
|
||||
//
|
||||
// The argument hashTable is scratch space for a hash table used by the
|
||||
// compressor. If provided, it should have length at least 1<<16. If it is
|
||||
// shorter (or nil), CompressBlock allocates its own hash table.
|
||||
//
|
||||
// The size of the compressed data is returned.
|
||||
//
|
||||
// If the destination buffer size is lower than CompressBlockBound and
|
||||
// the compressed size is 0 and no error, then the data is incompressible.
|
||||
//
|
||||
// An error is returned if the destination buffer is too small.
|
||||
|
||||
// CompressBlock is equivalent to Compressor.CompressBlock.
|
||||
// The final argument is ignored and should be set to nil.
|
||||
//
|
||||
// This function is deprecated. Use a Compressor instead.
|
||||
func CompressBlock(src, dst []byte, _ []int) (int, error) {
|
||||
return lz4block.CompressBlock(src, dst)
|
||||
}
|
||||
|
||||
// A CompressorHC compresses data into the LZ4 block format.
|
||||
// Its compression ratio is potentially better than that of a Compressor,
|
||||
// but it is also slower and requires more memory.
|
||||
//
|
||||
// A Compressor is not safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// Use a Writer to compress into the LZ4 stream format.
|
||||
type CompressorHC struct {
|
||||
// Level is the maximum search depth for compression.
|
||||
// Values <= 0 mean no maximum.
|
||||
Level CompressionLevel
|
||||
c lz4block.CompressorHC
|
||||
}
|
||||
|
||||
// CompressBlock compresses the source buffer src into the destination dst.
|
||||
//
|
||||
// If compression is successful, the first return value is the size of the
|
||||
// compressed data, which is always >0.
|
||||
//
|
||||
// If dst has length at least CompressBlockBound(len(src)), compression always
|
||||
// succeeds. Otherwise, the first return value is zero. The error return is
|
||||
// non-nil if the compressed data does not fit in dst, but it might fit in a
|
||||
// larger buffer that is still smaller than CompressBlockBound(len(src)). The
|
||||
// return value (0, nil) means the data is likely incompressible and a buffer
|
||||
// of length CompressBlockBound(len(src)) should be passed in.
|
||||
func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
|
||||
return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
|
||||
}
|
||||
|
||||
// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
|
||||
// The final two arguments are ignored and should be set to nil.
|
||||
//
|
||||
// This function is deprecated. Use a CompressorHC instead.
|
||||
func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
|
||||
return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
|
||||
// block is corrupted or the destination buffer is not large enough for the uncompressed data.
|
||||
ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
|
||||
// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
|
||||
ErrInvalidFrame = lz4errors.ErrInvalidFrame
|
||||
// ErrInternalUnhandledState is an internal error.
|
||||
ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
|
||||
// ErrInvalidHeaderChecksum is returned when reading a frame.
|
||||
ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
|
||||
// ErrInvalidBlockChecksum is returned when reading a frame.
|
||||
ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
|
||||
// ErrInvalidFrameChecksum is returned when reading a frame.
|
||||
ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
|
||||
// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
|
||||
ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
|
||||
// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
|
||||
ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
|
||||
// ErrOptionInvalidBlockSize is returned when
|
||||
ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
|
||||
// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
|
||||
ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
|
||||
// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
|
||||
ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
|
||||
)
|
214
pkg/metadata/vendor/github.com/pierrec/lz4/v4/options.go
generated
vendored
Normal file
214
pkg/metadata/vendor/github.com/pierrec/lz4/v4/options.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
)
|
||||
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
|
||||
|
||||
type (
|
||||
applier interface {
|
||||
Apply(...Option) error
|
||||
private()
|
||||
}
|
||||
// Option defines the parameters to setup an LZ4 Writer or Reader.
|
||||
Option func(applier) error
|
||||
)
|
||||
|
||||
// String returns a string representation of the option with its parameter(s).
|
||||
func (o Option) String() string {
|
||||
return o(nil).Error()
|
||||
}
|
||||
|
||||
// Default options.
|
||||
var (
|
||||
DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
|
||||
DefaultChecksumOption = ChecksumOption(true)
|
||||
DefaultConcurrency = ConcurrencyOption(1)
|
||||
defaultOnBlockDone = OnBlockDoneOption(nil)
|
||||
)
|
||||
|
||||
const (
|
||||
Block64Kb BlockSize = 1 << (16 + iota*2)
|
||||
Block256Kb
|
||||
Block1Mb
|
||||
Block4Mb
|
||||
)
|
||||
|
||||
// BlockSizeIndex defines the size of the blocks to be compressed.
|
||||
type BlockSize uint32
|
||||
|
||||
// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
|
||||
func BlockSizeOption(size BlockSize) Option {
|
||||
return func(a applier) error {
|
||||
switch w := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("BlockSizeOption(%s)", size)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
size := uint32(size)
|
||||
if !lz4block.IsValid(size) {
|
||||
return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
|
||||
}
|
||||
w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
// BlockChecksumOption enables or disables block checksum (default=false).
|
||||
func BlockChecksumOption(flag bool) Option {
|
||||
return func(a applier) error {
|
||||
switch w := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
w.frame.Descriptor.Flags.BlockChecksumSet(flag)
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
// ChecksumOption enables/disables all blocks or content checksum (default=true).
|
||||
func ChecksumOption(flag bool) Option {
|
||||
return func(a applier) error {
|
||||
switch w := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("ChecksumOption(%v)", flag)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
w.frame.Descriptor.Flags.ContentChecksumSet(flag)
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
|
||||
// whole uncompressed data stream.
|
||||
func SizeOption(size uint64) Option {
|
||||
return func(a applier) error {
|
||||
switch w := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("SizeOption(%d)", size)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
w.frame.Descriptor.Flags.SizeSet(size > 0)
|
||||
w.frame.Descriptor.ContentSize = size
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
// ConcurrencyOption sets the number of go routines used for compression.
|
||||
// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
|
||||
func ConcurrencyOption(n int) Option {
|
||||
if n <= 0 {
|
||||
n = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
return func(a applier) error {
|
||||
switch rw := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("ConcurrencyOption(%d)", n)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
rw.num = n
|
||||
return nil
|
||||
case *Reader:
|
||||
rw.num = n
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
|
||||
type CompressionLevel uint32
|
||||
|
||||
const (
|
||||
Fast CompressionLevel = 0
|
||||
Level1 CompressionLevel = 1 << (8 + iota)
|
||||
Level2
|
||||
Level3
|
||||
Level4
|
||||
Level5
|
||||
Level6
|
||||
Level7
|
||||
Level8
|
||||
Level9
|
||||
)
|
||||
|
||||
// CompressionLevelOption defines the compression level (default=Fast).
|
||||
func CompressionLevelOption(level CompressionLevel) Option {
|
||||
return func(a applier) error {
|
||||
switch w := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("CompressionLevelOption(%s)", level)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
switch level {
|
||||
case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
|
||||
default:
|
||||
return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
|
||||
}
|
||||
w.level = lz4block.CompressionLevel(level)
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
func onBlockDone(int) {}
|
||||
|
||||
// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
|
||||
// for a Reader, it is when it has been uncompressed.
|
||||
func OnBlockDoneOption(handler func(size int)) Option {
|
||||
if handler == nil {
|
||||
handler = onBlockDone
|
||||
}
|
||||
return func(a applier) error {
|
||||
switch rw := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
rw.handler = handler
|
||||
return nil
|
||||
case *Reader:
|
||||
rw.handler = handler
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
||||
|
||||
// LegacyOption provides support for writing LZ4 frames in the legacy format.
|
||||
//
|
||||
// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
|
||||
//
|
||||
// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
|
||||
// the compressed stream is followed by the original (uncompressed) size of
|
||||
// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
|
||||
// This is also supported as a special case.
|
||||
func LegacyOption(legacy bool) Option {
|
||||
return func(a applier) error {
|
||||
switch rw := a.(type) {
|
||||
case nil:
|
||||
s := fmt.Sprintf("LegacyOption(%v)", legacy)
|
||||
return lz4errors.Error(s)
|
||||
case *Writer:
|
||||
rw.legacy = legacy
|
||||
return nil
|
||||
}
|
||||
return lz4errors.ErrOptionNotApplicable
|
||||
}
|
||||
}
|
92
pkg/metadata/vendor/github.com/pierrec/lz4/v4/options_gen.go
generated
vendored
Normal file
92
pkg/metadata/vendor/github.com/pierrec/lz4/v4/options_gen.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
|
||||
|
||||
package lz4
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Block64Kb-65536]
|
||||
_ = x[Block256Kb-262144]
|
||||
_ = x[Block1Mb-1048576]
|
||||
_ = x[Block4Mb-4194304]
|
||||
}
|
||||
|
||||
const (
|
||||
_BlockSize_name_0 = "Block64Kb"
|
||||
_BlockSize_name_1 = "Block256Kb"
|
||||
_BlockSize_name_2 = "Block1Mb"
|
||||
_BlockSize_name_3 = "Block4Mb"
|
||||
)
|
||||
|
||||
func (i BlockSize) String() string {
|
||||
switch {
|
||||
case i == 65536:
|
||||
return _BlockSize_name_0
|
||||
case i == 262144:
|
||||
return _BlockSize_name_1
|
||||
case i == 1048576:
|
||||
return _BlockSize_name_2
|
||||
case i == 4194304:
|
||||
return _BlockSize_name_3
|
||||
default:
|
||||
return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Fast-0]
|
||||
_ = x[Level1-512]
|
||||
_ = x[Level2-1024]
|
||||
_ = x[Level3-2048]
|
||||
_ = x[Level4-4096]
|
||||
_ = x[Level5-8192]
|
||||
_ = x[Level6-16384]
|
||||
_ = x[Level7-32768]
|
||||
_ = x[Level8-65536]
|
||||
_ = x[Level9-131072]
|
||||
}
|
||||
|
||||
const (
|
||||
_CompressionLevel_name_0 = "Fast"
|
||||
_CompressionLevel_name_1 = "Level1"
|
||||
_CompressionLevel_name_2 = "Level2"
|
||||
_CompressionLevel_name_3 = "Level3"
|
||||
_CompressionLevel_name_4 = "Level4"
|
||||
_CompressionLevel_name_5 = "Level5"
|
||||
_CompressionLevel_name_6 = "Level6"
|
||||
_CompressionLevel_name_7 = "Level7"
|
||||
_CompressionLevel_name_8 = "Level8"
|
||||
_CompressionLevel_name_9 = "Level9"
|
||||
)
|
||||
|
||||
func (i CompressionLevel) String() string {
|
||||
switch {
|
||||
case i == 0:
|
||||
return _CompressionLevel_name_0
|
||||
case i == 512:
|
||||
return _CompressionLevel_name_1
|
||||
case i == 1024:
|
||||
return _CompressionLevel_name_2
|
||||
case i == 2048:
|
||||
return _CompressionLevel_name_3
|
||||
case i == 4096:
|
||||
return _CompressionLevel_name_4
|
||||
case i == 8192:
|
||||
return _CompressionLevel_name_5
|
||||
case i == 16384:
|
||||
return _CompressionLevel_name_6
|
||||
case i == 32768:
|
||||
return _CompressionLevel_name_7
|
||||
case i == 65536:
|
||||
return _CompressionLevel_name_8
|
||||
case i == 131072:
|
||||
return _CompressionLevel_name_9
|
||||
default:
|
||||
return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
275
pkg/metadata/vendor/github.com/pierrec/lz4/v4/reader.go
generated
vendored
Normal file
275
pkg/metadata/vendor/github.com/pierrec/lz4/v4/reader.go
generated
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4stream"
|
||||
)
|
||||
|
||||
var readerStates = []aState{
|
||||
noState: newState,
|
||||
errorState: newState,
|
||||
newState: readState,
|
||||
readState: closedState,
|
||||
closedState: newState,
|
||||
}
|
||||
|
||||
// NewReader returns a new LZ4 frame decoder.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return newReader(r, false)
|
||||
}
|
||||
|
||||
func newReader(r io.Reader, legacy bool) *Reader {
|
||||
zr := &Reader{frame: lz4stream.NewFrame()}
|
||||
zr.state.init(readerStates)
|
||||
_ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
|
||||
zr.Reset(r)
|
||||
return zr
|
||||
}
|
||||
|
||||
// Reader allows reading an LZ4 stream.
|
||||
type Reader struct {
|
||||
state _State
|
||||
src io.Reader // source reader
|
||||
num int // concurrency level
|
||||
frame *lz4stream.Frame // frame being read
|
||||
data []byte // block buffer allocated in non concurrent mode
|
||||
reads chan []byte // pending data
|
||||
idx int // size of pending data
|
||||
handler func(int)
|
||||
cum uint32
|
||||
dict []byte
|
||||
}
|
||||
|
||||
func (*Reader) private() {}
|
||||
|
||||
func (r *Reader) Apply(options ...Option) (err error) {
|
||||
defer r.state.check(&err)
|
||||
switch r.state.state {
|
||||
case newState:
|
||||
case errorState:
|
||||
return r.state.err
|
||||
default:
|
||||
return lz4errors.ErrOptionClosedOrError
|
||||
}
|
||||
for _, o := range options {
|
||||
if err = o(r); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying uncompressed data, if set in the stream.
|
||||
func (r *Reader) Size() int {
|
||||
switch r.state.state {
|
||||
case readState, closedState:
|
||||
if r.frame.Descriptor.Flags.Size() {
|
||||
return int(r.frame.Descriptor.ContentSize)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *Reader) isNotConcurrent() bool {
|
||||
return r.num == 1
|
||||
}
|
||||
|
||||
func (r *Reader) init() error {
|
||||
err := r.frame.ParseHeaders(r.src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !r.frame.Descriptor.Flags.BlockIndependence() {
|
||||
// We can't decompress dependent blocks concurrently.
|
||||
// Instead of throwing an error to the user, silently drop concurrency
|
||||
r.num = 1
|
||||
}
|
||||
data, err := r.frame.InitR(r.src, r.num)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.reads = data
|
||||
r.idx = 0
|
||||
size := r.frame.Descriptor.Flags.BlockSizeIndex()
|
||||
r.data = size.Get()
|
||||
r.cum = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) Read(buf []byte) (n int, err error) {
|
||||
defer r.state.check(&err)
|
||||
switch r.state.state {
|
||||
case readState:
|
||||
case closedState, errorState:
|
||||
return 0, r.state.err
|
||||
case newState:
|
||||
// First initialization.
|
||||
if err = r.init(); r.state.next(err) {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return 0, r.state.fail()
|
||||
}
|
||||
for len(buf) > 0 {
|
||||
var bn int
|
||||
if r.idx == 0 {
|
||||
if r.isNotConcurrent() {
|
||||
bn, err = r.read(buf)
|
||||
} else {
|
||||
lz4block.Put(r.data)
|
||||
r.data = <-r.reads
|
||||
if len(r.data) == 0 {
|
||||
// No uncompressed data: something went wrong or we are done.
|
||||
err = r.frame.Blocks.ErrorR()
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
if er := r.frame.CloseR(r.src); er != nil {
|
||||
err = er
|
||||
}
|
||||
lz4block.Put(r.data)
|
||||
r.data = nil
|
||||
return
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
if bn == 0 {
|
||||
// Fill buf with buffered data.
|
||||
bn = copy(buf, r.data[r.idx:])
|
||||
r.idx += bn
|
||||
if r.idx == len(r.data) {
|
||||
// All data read, get ready for the next Read.
|
||||
r.idx = 0
|
||||
}
|
||||
}
|
||||
buf = buf[bn:]
|
||||
n += bn
|
||||
r.handler(bn)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// read uncompresses the next block as follow:
|
||||
// - if buf has enough room, the block is uncompressed into it directly
|
||||
// and the lenght of used space is returned
|
||||
// - else, the uncompress data is stored in r.data and 0 is returned
|
||||
func (r *Reader) read(buf []byte) (int, error) {
|
||||
block := r.frame.Blocks.Block
|
||||
_, err := block.Read(r.frame, r.src, r.cum)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var direct bool
|
||||
dst := r.data[:cap(r.data)]
|
||||
if len(buf) >= len(dst) {
|
||||
// Uncompress directly into buf.
|
||||
direct = true
|
||||
dst = buf
|
||||
}
|
||||
dst, err = block.Uncompress(r.frame, dst, r.dict, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !r.frame.Descriptor.Flags.BlockIndependence() {
|
||||
if len(r.dict)+len(dst) > 128*1024 {
|
||||
preserveSize := 64*1024 - len(dst)
|
||||
if preserveSize < 0 {
|
||||
preserveSize = 0
|
||||
}
|
||||
r.dict = r.dict[len(r.dict)-preserveSize:]
|
||||
}
|
||||
r.dict = append(r.dict, dst...)
|
||||
}
|
||||
r.cum += uint32(len(dst))
|
||||
if direct {
|
||||
return len(dst), nil
|
||||
}
|
||||
r.data = dst
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Reset clears the state of the Reader r such that it is equivalent to its
|
||||
// initial state from NewReader, but instead reading from reader.
|
||||
// No access to reader is performed.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
if r.data != nil {
|
||||
lz4block.Put(r.data)
|
||||
r.data = nil
|
||||
}
|
||||
r.frame.Reset(r.num)
|
||||
r.state.reset()
|
||||
r.src = reader
|
||||
r.reads = nil
|
||||
}
|
||||
|
||||
// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
|
||||
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
switch r.state.state {
|
||||
case closedState, errorState:
|
||||
return 0, r.state.err
|
||||
case newState:
|
||||
if err = r.init(); r.state.next(err) {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return 0, r.state.fail()
|
||||
}
|
||||
defer r.state.nextd(&err)
|
||||
|
||||
var data []byte
|
||||
if r.isNotConcurrent() {
|
||||
size := r.frame.Descriptor.Flags.BlockSizeIndex()
|
||||
data = size.Get()
|
||||
defer lz4block.Put(data)
|
||||
}
|
||||
for {
|
||||
var bn int
|
||||
var dst []byte
|
||||
if r.isNotConcurrent() {
|
||||
bn, err = r.read(data)
|
||||
dst = data[:bn]
|
||||
} else {
|
||||
lz4block.Put(dst)
|
||||
dst = <-r.reads
|
||||
bn = len(dst)
|
||||
if bn == 0 {
|
||||
// No uncompressed data: something went wrong or we are done.
|
||||
err = r.frame.Blocks.ErrorR()
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
err = r.frame.CloseR(r.src)
|
||||
return
|
||||
default:
|
||||
return
|
||||
}
|
||||
r.handler(bn)
|
||||
bn, err = w.Write(dst)
|
||||
n += int64(bn)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header.
|
||||
func ValidFrameHeader(in []byte) (bool, error) {
|
||||
f := lz4stream.NewFrame()
|
||||
err := f.ParseHeaders(bytes.NewReader(in))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err == lz4errors.ErrInvalidFrame {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
75
pkg/metadata/vendor/github.com/pierrec/lz4/v4/state.go
generated
vendored
Normal file
75
pkg/metadata/vendor/github.com/pierrec/lz4/v4/state.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
)
|
||||
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
|
||||
|
||||
const (
|
||||
noState aState = iota // uninitialized reader
|
||||
errorState // unrecoverable error encountered
|
||||
newState // instantiated object
|
||||
readState // reading data
|
||||
writeState // writing data
|
||||
closedState // all done
|
||||
)
|
||||
|
||||
type (
|
||||
aState uint8
|
||||
_State struct {
|
||||
states []aState
|
||||
state aState
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func (s *_State) init(states []aState) {
|
||||
s.states = states
|
||||
s.state = states[0]
|
||||
}
|
||||
|
||||
func (s *_State) reset() {
|
||||
s.state = s.states[0]
|
||||
s.err = nil
|
||||
}
|
||||
|
||||
// next sets the state to the next one unless it is passed a non nil error.
|
||||
// It returns whether or not it is in error.
|
||||
func (s *_State) next(err error) bool {
|
||||
if err != nil {
|
||||
s.err = fmt.Errorf("%s: %w", s.state, err)
|
||||
s.state = errorState
|
||||
return true
|
||||
}
|
||||
s.state = s.states[s.state]
|
||||
return false
|
||||
}
|
||||
|
||||
// nextd is like next but for defers.
|
||||
func (s *_State) nextd(errp *error) bool {
|
||||
return errp != nil && s.next(*errp)
|
||||
}
|
||||
|
||||
// check sets s in error if not already in error and if the error is not nil or io.EOF,
|
||||
func (s *_State) check(errp *error) {
|
||||
if s.state == errorState || errp == nil {
|
||||
return
|
||||
}
|
||||
if err := *errp; err != nil {
|
||||
s.err = fmt.Errorf("%w[%s]", err, s.state)
|
||||
if !errors.Is(err, io.EOF) {
|
||||
s.state = errorState
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *_State) fail() error {
|
||||
s.state = errorState
|
||||
s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
|
||||
return s.err
|
||||
}
|
28
pkg/metadata/vendor/github.com/pierrec/lz4/v4/state_gen.go
generated
vendored
Normal file
28
pkg/metadata/vendor/github.com/pierrec/lz4/v4/state_gen.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
|
||||
|
||||
package lz4
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[noState-0]
|
||||
_ = x[errorState-1]
|
||||
_ = x[newState-2]
|
||||
_ = x[readState-3]
|
||||
_ = x[writeState-4]
|
||||
_ = x[closedState-5]
|
||||
}
|
||||
|
||||
const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
|
||||
|
||||
var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
|
||||
|
||||
func (i aState) String() string {
|
||||
if i >= aState(len(_aState_index)-1) {
|
||||
return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _aState_name[_aState_index[i]:_aState_index[i+1]]
|
||||
}
|
238
pkg/metadata/vendor/github.com/pierrec/lz4/v4/writer.go
generated
vendored
Normal file
238
pkg/metadata/vendor/github.com/pierrec/lz4/v4/writer.go
generated
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/pierrec/lz4/v4/internal/lz4block"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4errors"
|
||||
"github.com/pierrec/lz4/v4/internal/lz4stream"
|
||||
)
|
||||
|
||||
var writerStates = []aState{
|
||||
noState: newState,
|
||||
newState: writeState,
|
||||
writeState: closedState,
|
||||
closedState: newState,
|
||||
errorState: newState,
|
||||
}
|
||||
|
||||
// NewWriter returns a new LZ4 frame encoder.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
zw := &Writer{frame: lz4stream.NewFrame()}
|
||||
zw.state.init(writerStates)
|
||||
_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
|
||||
zw.Reset(w)
|
||||
return zw
|
||||
}
|
||||
|
||||
// Writer allows writing an LZ4 stream.
|
||||
type Writer struct {
|
||||
state _State
|
||||
src io.Writer // destination writer
|
||||
level lz4block.CompressionLevel // how hard to try
|
||||
num int // concurrency level
|
||||
frame *lz4stream.Frame // frame being built
|
||||
data []byte // pending data
|
||||
idx int // size of pending data
|
||||
handler func(int)
|
||||
legacy bool
|
||||
}
|
||||
|
||||
func (*Writer) private() {}
|
||||
|
||||
func (w *Writer) Apply(options ...Option) (err error) {
|
||||
defer w.state.check(&err)
|
||||
switch w.state.state {
|
||||
case newState:
|
||||
case errorState:
|
||||
return w.state.err
|
||||
default:
|
||||
return lz4errors.ErrOptionClosedOrError
|
||||
}
|
||||
w.Reset(w.src)
|
||||
for _, o := range options {
|
||||
if err = o(w); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Writer) isNotConcurrent() bool {
|
||||
return w.num == 1
|
||||
}
|
||||
|
||||
// init sets up the Writer when in newState. It does not change the Writer state.
|
||||
func (w *Writer) init() error {
|
||||
w.frame.InitW(w.src, w.num, w.legacy)
|
||||
size := w.frame.Descriptor.Flags.BlockSizeIndex()
|
||||
w.data = size.Get()
|
||||
w.idx = 0
|
||||
return w.frame.Descriptor.Write(w.frame, w.src)
|
||||
}
|
||||
|
||||
func (w *Writer) Write(buf []byte) (n int, err error) {
|
||||
defer w.state.check(&err)
|
||||
switch w.state.state {
|
||||
case writeState:
|
||||
case closedState, errorState:
|
||||
return 0, w.state.err
|
||||
case newState:
|
||||
if err = w.init(); w.state.next(err) {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return 0, w.state.fail()
|
||||
}
|
||||
|
||||
zn := len(w.data)
|
||||
for len(buf) > 0 {
|
||||
if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
|
||||
// Avoid a copy as there is enough data for a block.
|
||||
if err = w.write(buf[:zn], false); err != nil {
|
||||
return
|
||||
}
|
||||
n += zn
|
||||
buf = buf[zn:]
|
||||
continue
|
||||
}
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(w.data[w.idx:], buf)
|
||||
n += m
|
||||
w.idx += m
|
||||
buf = buf[m:]
|
||||
|
||||
if w.idx < len(w.data) {
|
||||
// Buffer not filled.
|
||||
return
|
||||
}
|
||||
|
||||
// Buffer full.
|
||||
if err = w.write(w.data, true); err != nil {
|
||||
return
|
||||
}
|
||||
if !w.isNotConcurrent() {
|
||||
size := w.frame.Descriptor.Flags.BlockSizeIndex()
|
||||
w.data = size.Get()
|
||||
}
|
||||
w.idx = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Writer) write(data []byte, safe bool) error {
|
||||
if w.isNotConcurrent() {
|
||||
block := w.frame.Blocks.Block
|
||||
err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
|
||||
w.handler(len(block.Data))
|
||||
return err
|
||||
}
|
||||
c := make(chan *lz4stream.FrameDataBlock)
|
||||
w.frame.Blocks.Blocks <- c
|
||||
go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
|
||||
b := lz4stream.NewFrameDataBlock(w.frame)
|
||||
c <- b.Compress(w.frame, data, w.level)
|
||||
<-c
|
||||
w.handler(len(b.Data))
|
||||
b.Close(w.frame)
|
||||
if safe {
|
||||
// safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
|
||||
lz4block.Put(data)
|
||||
}
|
||||
}(c, data, safe)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush any buffered data to the underlying writer immediately.
|
||||
func (w *Writer) Flush() (err error) {
|
||||
switch w.state.state {
|
||||
case writeState:
|
||||
case errorState:
|
||||
return w.state.err
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if w.idx > 0 {
|
||||
// Flush pending data, disable w.data freeing as it is done later on.
|
||||
if err = w.write(w.data[:w.idx], false); err != nil {
|
||||
return err
|
||||
}
|
||||
w.idx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying writer
|
||||
// without closing it.
|
||||
func (w *Writer) Close() error {
|
||||
if err := w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
err := w.frame.CloseW(w.src, w.num)
|
||||
// It is now safe to free the buffer.
|
||||
if w.data != nil {
|
||||
lz4block.Put(w.data)
|
||||
w.data = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset clears the state of the Writer w such that it is equivalent to its
|
||||
// initial state from NewWriter, but instead writing to writer.
|
||||
// Reset keeps the previous options unless overwritten by the supplied ones.
|
||||
// No access to writer is performed.
|
||||
//
|
||||
// w.Close must be called before Reset or pending data may be dropped.
|
||||
func (w *Writer) Reset(writer io.Writer) {
|
||||
w.frame.Reset(w.num)
|
||||
w.state.reset()
|
||||
w.src = writer
|
||||
}
|
||||
|
||||
// ReadFrom efficiently reads from r and compressed into the Writer destination.
|
||||
func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
switch w.state.state {
|
||||
case closedState, errorState:
|
||||
return 0, w.state.err
|
||||
case newState:
|
||||
if err = w.init(); w.state.next(err) {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return 0, w.state.fail()
|
||||
}
|
||||
defer w.state.check(&err)
|
||||
|
||||
size := w.frame.Descriptor.Flags.BlockSizeIndex()
|
||||
var done bool
|
||||
var rn int
|
||||
data := size.Get()
|
||||
if w.isNotConcurrent() {
|
||||
// Keep the same buffer for the whole process.
|
||||
defer lz4block.Put(data)
|
||||
}
|
||||
for !done {
|
||||
rn, err = io.ReadFull(r, data)
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF, io.ErrUnexpectedEOF: // read may be partial
|
||||
done = true
|
||||
default:
|
||||
return
|
||||
}
|
||||
n += int64(rn)
|
||||
err = w.write(data[:rn], true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
w.handler(rn)
|
||||
if !done && !w.isNotConcurrent() {
|
||||
// The buffer will be returned automatically by go routines (safe=true)
|
||||
// so get a new one fo the next round.
|
||||
data = size.Get()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
275
pkg/metadata/vendor/github.com/pierrec/lz4/writer.go
generated
vendored
275
pkg/metadata/vendor/github.com/pierrec/lz4/writer.go
generated
vendored
@@ -1,275 +0,0 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/pierrec/lz4/internal/xxh32"
|
||||
)
|
||||
|
||||
// Writer implements the LZ4 frame encoder.
|
||||
type Writer struct {
|
||||
Header
|
||||
// Handler called when a block has been successfully written out.
|
||||
// It provides the number of bytes written.
|
||||
OnBlockDone func(size int)
|
||||
|
||||
buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
|
||||
dst io.Writer // Destination.
|
||||
checksum xxh32.XXHZero // Frame checksum.
|
||||
zdata []byte // Compressed data.
|
||||
data []byte // Data to be compressed.
|
||||
idx int // Index into data.
|
||||
hashtable [winSize]int // Hash table used in CompressBlock().
|
||||
}
|
||||
|
||||
// NewWriter returns a new LZ4 frame encoder.
|
||||
// No access to the underlying io.Writer is performed.
|
||||
// The supplied Header is checked at the first Write.
|
||||
// It is ok to change it before the first Write but then not until a Reset() is performed.
|
||||
func NewWriter(dst io.Writer) *Writer {
|
||||
return &Writer{dst: dst}
|
||||
}
|
||||
|
||||
// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
|
||||
func (z *Writer) writeHeader() error {
|
||||
// Default to 4Mb if BlockMaxSize is not set.
|
||||
if z.Header.BlockMaxSize == 0 {
|
||||
z.Header.BlockMaxSize = bsMapID[7]
|
||||
}
|
||||
// The only option that needs to be validated.
|
||||
bSize := z.Header.BlockMaxSize
|
||||
bSizeID, ok := bsMapValue[bSize]
|
||||
if !ok {
|
||||
return fmt.Errorf("lz4: invalid block max size: %d", bSize)
|
||||
}
|
||||
// Allocate the compressed/uncompressed buffers.
|
||||
// The compressed buffer cannot exceed the uncompressed one.
|
||||
if cap(z.zdata) < bSize {
|
||||
// Only allocate if there is not enough capacity.
|
||||
// Allocate both buffers at once.
|
||||
z.zdata = make([]byte, 2*bSize)
|
||||
}
|
||||
z.data = z.zdata[:bSize] // Uncompressed buffer is the first half.
|
||||
z.zdata = z.zdata[:cap(z.zdata)][bSize:] // Compressed buffer is the second half.
|
||||
z.idx = 0
|
||||
|
||||
// Size is optional.
|
||||
buf := z.buf[:]
|
||||
|
||||
// Set the fixed size data: magic number, block max size and flags.
|
||||
binary.LittleEndian.PutUint32(buf[0:], frameMagic)
|
||||
flg := byte(Version << 6)
|
||||
flg |= 1 << 5 // No block dependency.
|
||||
if z.Header.BlockChecksum {
|
||||
flg |= 1 << 4
|
||||
}
|
||||
if z.Header.Size > 0 {
|
||||
flg |= 1 << 3
|
||||
}
|
||||
if !z.Header.NoChecksum {
|
||||
flg |= 1 << 2
|
||||
}
|
||||
buf[4] = flg
|
||||
buf[5] = bSizeID << 4
|
||||
|
||||
// Current buffer size: magic(4) + flags(1) + block max size (1).
|
||||
n := 6
|
||||
// Optional items.
|
||||
if z.Header.Size > 0 {
|
||||
binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
|
||||
n += 8
|
||||
}
|
||||
|
||||
// The header checksum includes the flags, block max size and optional Size.
|
||||
buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
|
||||
z.checksum.Reset()
|
||||
|
||||
// Header ready, write it out.
|
||||
if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
|
||||
return err
|
||||
}
|
||||
z.Header.done = true
|
||||
if debugFlag {
|
||||
debug("wrote header %v", z.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write compresses data from the supplied buffer into the underlying io.Writer.
|
||||
// Write does not return until the data has been written.
|
||||
func (z *Writer) Write(buf []byte) (int, error) {
|
||||
if !z.Header.done {
|
||||
if err := z.writeHeader(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if debugFlag {
|
||||
debug("input buffer len=%d index=%d", len(buf), z.idx)
|
||||
}
|
||||
|
||||
zn := len(z.data)
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
if z.idx == 0 && len(buf) >= zn {
|
||||
// Avoid a copy as there is enough data for a block.
|
||||
if err := z.compressBlock(buf[:zn]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += zn
|
||||
buf = buf[zn:]
|
||||
continue
|
||||
}
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(z.data[z.idx:], buf)
|
||||
n += m
|
||||
z.idx += m
|
||||
buf = buf[m:]
|
||||
if debugFlag {
|
||||
debug("%d bytes copied to buf, current index %d", n, z.idx)
|
||||
}
|
||||
|
||||
if z.idx < len(z.data) {
|
||||
// Buffer not filled.
|
||||
if debugFlag {
|
||||
debug("need more data for compression")
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Buffer full.
|
||||
if err := z.compressBlock(z.data); err != nil {
|
||||
return n, err
|
||||
}
|
||||
z.idx = 0
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// compressBlock compresses a block.
|
||||
func (z *Writer) compressBlock(data []byte) error {
|
||||
if !z.NoChecksum {
|
||||
z.checksum.Write(data)
|
||||
}
|
||||
|
||||
// The compressed block size cannot exceed the input's.
|
||||
var zn int
|
||||
var err error
|
||||
|
||||
if level := z.Header.CompressionLevel; level != 0 {
|
||||
zn, err = CompressBlockHC(data, z.zdata, level)
|
||||
} else {
|
||||
zn, err = CompressBlock(data, z.zdata, z.hashtable[:])
|
||||
}
|
||||
|
||||
var zdata []byte
|
||||
var bLen uint32
|
||||
if debugFlag {
|
||||
debug("block compression %d => %d", len(data), zn)
|
||||
}
|
||||
if err == nil && zn > 0 && zn < len(data) {
|
||||
// Compressible and compressed size smaller than uncompressed: ok!
|
||||
bLen = uint32(zn)
|
||||
zdata = z.zdata[:zn]
|
||||
} else {
|
||||
// Uncompressed block.
|
||||
bLen = uint32(len(data)) | compressedBlockFlag
|
||||
zdata = data
|
||||
}
|
||||
if debugFlag {
|
||||
debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
|
||||
}
|
||||
|
||||
// Write the block.
|
||||
if err := z.writeUint32(bLen); err != nil {
|
||||
return err
|
||||
}
|
||||
written, err := z.dst.Write(zdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h := z.OnBlockDone; h != nil {
|
||||
h(written)
|
||||
}
|
||||
|
||||
if !z.BlockChecksum {
|
||||
if debugFlag {
|
||||
debug("current frame checksum %x", z.checksum.Sum32())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
checksum := xxh32.ChecksumZero(zdata)
|
||||
if debugFlag {
|
||||
debug("block checksum %x", checksum)
|
||||
defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
|
||||
}
|
||||
return z.writeUint32(checksum)
|
||||
}
|
||||
|
||||
// Flush flushes any pending compressed data to the underlying writer.
|
||||
// Flush does not return until the data has been written.
|
||||
// If the underlying writer returns an error, Flush returns that error.
|
||||
func (z *Writer) Flush() error {
|
||||
if debugFlag {
|
||||
debug("flush with index %d", z.idx)
|
||||
}
|
||||
if z.idx == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := z.compressBlock(z.data[:z.idx]); err != nil {
|
||||
return err
|
||||
}
|
||||
z.idx = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
|
||||
func (z *Writer) Close() error {
|
||||
if !z.Header.done {
|
||||
if err := z.writeHeader(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := z.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if debugFlag {
|
||||
debug("writing last empty block")
|
||||
}
|
||||
if err := z.writeUint32(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if z.NoChecksum {
|
||||
return nil
|
||||
}
|
||||
checksum := z.checksum.Sum32()
|
||||
if debugFlag {
|
||||
debug("stream checksum %x", checksum)
|
||||
}
|
||||
return z.writeUint32(checksum)
|
||||
}
|
||||
|
||||
// Reset clears the state of the Writer z such that it is equivalent to its
|
||||
// initial state from NewWriter, but instead writing to w.
|
||||
// No access to the underlying io.Writer is performed.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.Header = Header{}
|
||||
z.dst = w
|
||||
z.checksum.Reset()
|
||||
z.zdata = z.zdata[:0]
|
||||
z.data = z.data[:0]
|
||||
z.idx = 0
|
||||
}
|
||||
|
||||
// writeUint32 writes a uint32 to the underlying writer.
|
||||
func (z *Writer) writeUint32(x uint32) error {
|
||||
buf := z.buf[:4]
|
||||
binary.LittleEndian.PutUint32(buf, x)
|
||||
_, err := z.dst.Write(buf)
|
||||
return err
|
||||
}
|
3
pkg/metadata/vendor/github.com/pkg/xattr/.gitignore
generated
vendored
3
pkg/metadata/vendor/github.com/pkg/xattr/.gitignore
generated
vendored
@@ -18,9 +18,6 @@ _cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
# Dependencies
|
||||
go.sum
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
17
pkg/metadata/vendor/github.com/pkg/xattr/.travis.sh
generated
vendored
17
pkg/metadata/vendor/github.com/pkg/xattr/.travis.sh
generated
vendored
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
echo "Building for Linux..."
|
||||
GOOS=linux go build
|
||||
|
||||
echo "Building for Darwin..."
|
||||
GOOS=darwin go build
|
||||
|
||||
echo "Building for FreeBSD..."
|
||||
GOOS=freebsd go build
|
||||
|
||||
echo "Building for Windows...(dummy)"
|
||||
GOOS=windows go build
|
||||
|
||||
echo "Running tests..."
|
||||
go vet
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic
|
26
pkg/metadata/vendor/github.com/pkg/xattr/.travis.yml
generated
vendored
26
pkg/metadata/vendor/github.com/pkg/xattr/.travis.yml
generated
vendored
@@ -1,26 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
- windows
|
||||
|
||||
before_install:
|
||||
- go version
|
||||
- export GO111MODULE=on
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
|
||||
install:
|
||||
- go build
|
||||
|
||||
script:
|
||||
- ./.travis.sh
|
||||
# goimports on windows gives false positives
|
||||
- if [[ "${TRAVIS_OS_NAME}" != "windows" ]]; then diff <(goimports -d .) <(printf ""); fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
5
pkg/metadata/vendor/github.com/pkg/xattr/README.md
generated
vendored
5
pkg/metadata/vendor/github.com/pkg/xattr/README.md
generated
vendored
@@ -1,12 +1,11 @@
|
||||
[](http://godoc.org/github.com/pkg/xattr)
|
||||
[](https://goreportcard.com/report/github.com/pkg/xattr)
|
||||
[](https://travis-ci.org/pkg/xattr)
|
||||
[](https://github.com/pkg/xattr/releases)
|
||||
[](https://github.com/pkg/xattr/actions?query=workflow%3Abuild)
|
||||
[](https://codecov.io/gh/pkg/xattr)
|
||||
|
||||
xattr
|
||||
=====
|
||||
Extended attribute support for Go (linux + darwin + freebsd + netbsd).
|
||||
Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris).
|
||||
|
||||
"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes)
|
||||
|
||||
|
4
pkg/metadata/vendor/github.com/pkg/xattr/go.mod
generated
vendored
4
pkg/metadata/vendor/github.com/pkg/xattr/go.mod
generated
vendored
@@ -1,3 +1,5 @@
|
||||
module github.com/pkg/xattr
|
||||
|
||||
require golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51
|
||||
go 1.14
|
||||
|
||||
require golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f
|
||||
|
4
pkg/metadata/vendor/github.com/pkg/xattr/go.sum
generated
vendored
Normal file
4
pkg/metadata/vendor/github.com/pkg/xattr/go.sum
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1 h1:a/mKvvZr9Jcc8oKfcmgzyp7OwF73JPWsQLvH1z2Kxck=
|
||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
29
pkg/metadata/vendor/github.com/pkg/xattr/xattr.go
generated
vendored
29
pkg/metadata/vendor/github.com/pkg/xattr/xattr.go
generated
vendored
@@ -29,8 +29,31 @@ type Error struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.Op + " " + e.Path + " " + e.Name + ": " + e.Err.Error()
|
||||
func (e *Error) Unwrap() error { return e.Err }
|
||||
|
||||
func (e *Error) Error() (errstr string) {
|
||||
if e.Op != "" {
|
||||
errstr += e.Op
|
||||
}
|
||||
if e.Path != "" {
|
||||
if errstr != "" {
|
||||
errstr += " "
|
||||
}
|
||||
errstr += e.Path
|
||||
}
|
||||
if e.Name != "" {
|
||||
if errstr != "" {
|
||||
errstr += " "
|
||||
}
|
||||
errstr += e.Name
|
||||
}
|
||||
if e.Err != nil {
|
||||
if errstr != "" {
|
||||
errstr += ": "
|
||||
}
|
||||
errstr += e.Err.Error()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves extended attribute data associated with path. It will follow
|
||||
@@ -85,7 +108,7 @@ func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) {
|
||||
// truncated, and we retry with a bigger buffer. Contrary to documentation,
|
||||
// MacOS never seems to return ERANGE!
|
||||
// To keep the code simple, we always check both conditions, and sometimes
|
||||
// double the buffer size without it being strictly neccessary.
|
||||
// double the buffer size without it being strictly necessary.
|
||||
if err == syscall.ERANGE || read == size {
|
||||
// The buffer was too small. Try again.
|
||||
size <<= 1
|
||||
|
4
pkg/metadata/vendor/github.com/pkg/xattr/xattr_bsd.go
generated
vendored
4
pkg/metadata/vendor/github.com/pkg/xattr/xattr_bsd.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build freebsd || netbsd
|
||||
// +build freebsd netbsd
|
||||
|
||||
package xattr
|
||||
@@ -9,6 +10,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// XATTR_SUPPORTED will be true if the current platform is supported
|
||||
XATTR_SUPPORTED = true
|
||||
|
||||
EXTATTR_NAMESPACE_USER = 1
|
||||
|
||||
// ENOATTR is not exported by the syscall package on Linux, because it is
|
||||
|
4
pkg/metadata/vendor/github.com/pkg/xattr/xattr_darwin.go
generated
vendored
4
pkg/metadata/vendor/github.com/pkg/xattr/xattr_darwin.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package xattr
|
||||
@@ -11,6 +12,9 @@ import (
|
||||
|
||||
// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html
|
||||
const (
|
||||
// XATTR_SUPPORTED will be true if the current platform is supported
|
||||
XATTR_SUPPORTED = true
|
||||
|
||||
XATTR_NOFOLLOW = 0x0001
|
||||
XATTR_CREATE = 0x0002
|
||||
XATTR_REPLACE = 0x0004
|
||||
|
85
pkg/metadata/vendor/github.com/pkg/xattr/xattr_linux.go
generated
vendored
85
pkg/metadata/vendor/github.com/pkg/xattr/xattr_linux.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package xattr
|
||||
@@ -10,6 +11,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// XATTR_SUPPORTED will be true if the current platform is supported
|
||||
XATTR_SUPPORTED = true
|
||||
|
||||
XATTR_CREATE = unix.XATTR_CREATE
|
||||
XATTR_REPLACE = unix.XATTR_REPLACE
|
||||
|
||||
@@ -19,52 +23,109 @@ const (
|
||||
ENOATTR = syscall.ENODATA
|
||||
)
|
||||
|
||||
// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system
|
||||
// calls. This function works around this by retrying system calls until they
|
||||
// stop returning EINTR.
|
||||
//
|
||||
// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce.
|
||||
func ignoringEINTR(fn func() error) (err error) {
|
||||
for {
|
||||
err = fn()
|
||||
if err != unix.EINTR {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getxattr(path string, name string, data []byte) (int, error) {
|
||||
return unix.Getxattr(path, name, data)
|
||||
var r int
|
||||
err := ignoringEINTR(func() (err error) {
|
||||
r, err = unix.Getxattr(path, name, data)
|
||||
return err
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
func lgetxattr(path string, name string, data []byte) (int, error) {
|
||||
return unix.Lgetxattr(path, name, data)
|
||||
var r int
|
||||
err := ignoringEINTR(func() (err error) {
|
||||
r, err = unix.Lgetxattr(path, name, data)
|
||||
return err
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
|
||||
return unix.Fgetxattr(int(f.Fd()), name, data)
|
||||
var r int
|
||||
err := ignoringEINTR(func() (err error) {
|
||||
r, err = unix.Fgetxattr(int(f.Fd()), name, data)
|
||||
return err
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
func setxattr(path string, name string, data []byte, flags int) error {
|
||||
return unix.Setxattr(path, name, data, flags)
|
||||
return ignoringEINTR(func() (err error) {
|
||||
return unix.Setxattr(path, name, data, flags)
|
||||
})
|
||||
}
|
||||
|
||||
func lsetxattr(path string, name string, data []byte, flags int) error {
|
||||
return unix.Lsetxattr(path, name, data, flags)
|
||||
return ignoringEINTR(func() (err error) {
|
||||
return unix.Lsetxattr(path, name, data, flags)
|
||||
})
|
||||
}
|
||||
|
||||
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
|
||||
return unix.Fsetxattr(int(f.Fd()), name, data, flags)
|
||||
return ignoringEINTR(func() (err error) {
|
||||
return unix.Fsetxattr(int(f.Fd()), name, data, flags)
|
||||
})
|
||||
}
|
||||
|
||||
func removexattr(path string, name string) error {
|
||||
return unix.Removexattr(path, name)
|
||||
return ignoringEINTR(func() (err error) {
|
||||
return unix.Removexattr(path, name)
|
||||
})
|
||||
}
|
||||
|
||||
func lremovexattr(path string, name string) error {
|
||||
return unix.Lremovexattr(path, name)
|
||||
return ignoringEINTR(func() (err error) {
|
||||
return unix.Lremovexattr(path, name)
|
||||
})
|
||||
}
|
||||
|
||||
func fremovexattr(f *os.File, name string) error {
|
||||
return unix.Fremovexattr(int(f.Fd()), name)
|
||||
return ignoringEINTR(func() (err error) {
|
||||
return unix.Fremovexattr(int(f.Fd()), name)
|
||||
})
|
||||
}
|
||||
|
||||
func listxattr(path string, data []byte) (int, error) {
|
||||
return unix.Listxattr(path, data)
|
||||
var r int
|
||||
err := ignoringEINTR(func() (err error) {
|
||||
r, err = unix.Listxattr(path, data)
|
||||
return err
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
func llistxattr(path string, data []byte) (int, error) {
|
||||
return unix.Llistxattr(path, data)
|
||||
var r int
|
||||
err := ignoringEINTR(func() (err error) {
|
||||
r, err = unix.Llistxattr(path, data)
|
||||
return err
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
func flistxattr(f *os.File, data []byte) (int, error) {
|
||||
return unix.Flistxattr(int(f.Fd()), data)
|
||||
var r int
|
||||
err := ignoringEINTR(func() (err error) {
|
||||
r, err = unix.Flistxattr(int(f.Fd()), data)
|
||||
return err
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
// stringsFromByteSlice converts a sequence of attributes to a []string.
|
||||
|
165
pkg/metadata/vendor/github.com/pkg/xattr/xattr_solaris.go
generated
vendored
Normal file
165
pkg/metadata/vendor/github.com/pkg/xattr/xattr_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
package xattr
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// XATTR_SUPPORTED will be true if the current platform is supported
|
||||
XATTR_SUPPORTED = true
|
||||
|
||||
XATTR_CREATE = 0x1
|
||||
XATTR_REPLACE = 0x2
|
||||
|
||||
// ENOATTR is not exported by the syscall package on Linux, because it is
|
||||
// an alias for ENODATA. We export it here so it is available on all
|
||||
// our supported platforms.
|
||||
ENOATTR = syscall.ENODATA
|
||||
)
|
||||
|
||||
func getxattr(path string, name string, data []byte) (int, error) {
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
return fgetxattr(f, name, data)
|
||||
}
|
||||
|
||||
func lgetxattr(path string, name string, data []byte) (int, error) {
|
||||
return 0, unix.ENOTSUP
|
||||
}
|
||||
|
||||
func fgetxattr(f *os.File, name string, data []byte) (int, error) {
|
||||
fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = unix.Close(fd)
|
||||
}()
|
||||
return unix.Read(fd, data)
|
||||
}
|
||||
|
||||
func setxattr(path string, name string, data []byte, flags int) error {
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fsetxattr(f, name, data, flags)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func lsetxattr(path string, name string, data []byte, flags int) error {
|
||||
return unix.ENOTSUP
|
||||
}
|
||||
|
||||
func fsetxattr(f *os.File, name string, data []byte, flags int) error {
|
||||
mode := unix.O_WRONLY | unix.O_XATTR
|
||||
if flags&XATTR_REPLACE != 0 {
|
||||
mode |= unix.O_TRUNC
|
||||
} else if flags&XATTR_CREATE != 0 {
|
||||
mode |= unix.O_CREAT | unix.O_EXCL
|
||||
} else {
|
||||
mode |= unix.O_CREAT | unix.O_TRUNC
|
||||
}
|
||||
fd, err := unix.Openat(int(f.Fd()), name, mode, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = unix.Write(fd, data); err != nil {
|
||||
_ = unix.Close(fd)
|
||||
return err
|
||||
}
|
||||
return unix.Close(fd)
|
||||
}
|
||||
|
||||
func removexattr(path string, name string) error {
|
||||
fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f := os.NewFile(uintptr(fd), path)
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
return fremovexattr(f, name)
|
||||
}
|
||||
|
||||
func lremovexattr(path string, name string) error {
|
||||
return unix.ENOTSUP
|
||||
}
|
||||
|
||||
func fremovexattr(f *os.File, name string) error {
|
||||
fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = unix.Close(fd)
|
||||
}()
|
||||
return unix.Unlinkat(fd, name, 0)
|
||||
}
|
||||
|
||||
func listxattr(path string, data []byte) (int, error) {
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
return flistxattr(f, data)
|
||||
}
|
||||
|
||||
func llistxattr(path string, data []byte) (int, error) {
|
||||
return 0, unix.ENOTSUP
|
||||
}
|
||||
|
||||
func flistxattr(f *os.File, data []byte) (int, error) {
|
||||
fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0)
|
||||
if err != nil {
|
||||
return 0, unix.ENOTSUP
|
||||
}
|
||||
xf := os.NewFile(uintptr(fd), f.Name())
|
||||
defer func() {
|
||||
_ = xf.Close()
|
||||
}()
|
||||
names, err := xf.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var buf []byte
|
||||
for _, name := range names {
|
||||
buf = append(buf, append([]byte(name), '\000')...)
|
||||
}
|
||||
if data == nil {
|
||||
return len(buf), nil
|
||||
}
|
||||
return copy(data, buf), nil
|
||||
}
|
||||
|
||||
// stringsFromByteSlice converts a sequence of attributes to a []string.
|
||||
// On Darwin and Linux, each entry is a NULL-terminated string.
|
||||
func stringsFromByteSlice(buf []byte) (result []string) {
|
||||
offset := 0
|
||||
for index, b := range buf {
|
||||
if b == 0 {
|
||||
result = append(result, string(buf[offset:index]))
|
||||
offset = index + 1
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
12
pkg/metadata/vendor/github.com/pkg/xattr/xattr_unsupported.go
generated
vendored
12
pkg/metadata/vendor/github.com/pkg/xattr/xattr_unsupported.go
generated
vendored
@@ -1,11 +1,21 @@
|
||||
// +build !linux,!freebsd,!netbsd,!darwin
|
||||
//go:build !linux && !freebsd && !netbsd && !darwin && !solaris
|
||||
// +build !linux,!freebsd,!netbsd,!darwin,!solaris
|
||||
|
||||
package xattr
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// We need to use the default for non supported operating systems
|
||||
ENOATTR = syscall.Errno(0x59)
|
||||
)
|
||||
|
||||
// XATTR_SUPPORTED will be true if the current platform is supported
|
||||
const XATTR_SUPPORTED = false
|
||||
|
||||
func getxattr(path string, name string, data []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
14
pkg/metadata/vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
14
pkg/metadata/vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
@@ -4,14 +4,12 @@ git:
|
||||
depth: 1
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
go: [1.13.x, 1.14.x]
|
||||
os: [linux, osx]
|
||||
go: 1.15.x
|
||||
os: linux
|
||||
install:
|
||||
- ./travis/install.sh
|
||||
script:
|
||||
- ./travis/cross_build.sh
|
||||
- ./travis/lint.sh
|
||||
- export GOMAXPROCS=4
|
||||
- export GORACE=halt_on_error=1
|
||||
- go test -race -v ./...
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
|
||||
- cd ci
|
||||
- go run mage.go -v -w ../ crossBuild
|
||||
- go run mage.go -v -w ../ lint
|
||||
- go run mage.go -v -w ../ test
|
||||
|
36
pkg/metadata/vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
36
pkg/metadata/vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
@@ -1,3 +1,39 @@
|
||||
# 1.8.1
|
||||
Code quality:
|
||||
* move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
|
||||
* improve timestamp format documentation
|
||||
|
||||
Fixes:
|
||||
* fix race condition on logger hooks
|
||||
|
||||
|
||||
# 1.8.0
|
||||
|
||||
Correct versioning number replacing v1.7.1.
|
||||
|
||||
# 1.7.1
|
||||
|
||||
Beware this release has introduced a new public API and its semver is therefore incorrect.
|
||||
|
||||
Code quality:
|
||||
* use go 1.15 in travis
|
||||
* use magefile as task runner
|
||||
|
||||
Fixes:
|
||||
* small fixes about new go 1.13 error formatting system
|
||||
* Fix for long time race condiction with mutating data hooks
|
||||
|
||||
Features:
|
||||
* build support for zos
|
||||
|
||||
# 1.7.0
|
||||
Fixes:
|
||||
* the dependency toward a windows terminal library has been removed
|
||||
|
||||
Features:
|
||||
* a new buffer pool management API has been added
|
||||
* a set of `<LogLevel>Fn()` functions have been added
|
||||
|
||||
# 1.6.0
|
||||
Fixes:
|
||||
* end of line cleanup
|
||||
|
6
pkg/metadata/vendor/github.com/sirupsen/logrus/README.md
generated
vendored
6
pkg/metadata/vendor/github.com/sirupsen/logrus/README.md
generated
vendored
@@ -1,4 +1,4 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [](https://travis-ci.org/sirupsen/logrus) [](https://pkg.go.dev/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger.
|
||||
@@ -341,7 +341,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
func init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
@@ -402,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
|
9
pkg/metadata/vendor/github.com/sirupsen/logrus/buffer_pool.go
generated
vendored
9
pkg/metadata/vendor/github.com/sirupsen/logrus/buffer_pool.go
generated
vendored
@@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer {
|
||||
return p.pool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func getBuffer() *bytes.Buffer {
|
||||
return bufferPool.Get()
|
||||
}
|
||||
|
||||
func putBuffer(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufferPool.Put(buf)
|
||||
}
|
||||
|
||||
// SetBufferPool allows to replace the default logrus buffer pool
|
||||
// to better meets the specific needs of an application.
|
||||
func SetBufferPool(bp BufferPool) {
|
||||
|
86
pkg/metadata/vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
86
pkg/metadata/vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
@@ -78,6 +78,14 @@ func NewEntry(logger *Logger) *Entry {
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Dup() *Entry {
|
||||
data := make(Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
|
||||
}
|
||||
|
||||
// Returns the bytes representation of this entry from the formatter.
|
||||
func (entry *Entry) Bytes() ([]byte, error) {
|
||||
return entry.Logger.Formatter.Format(entry)
|
||||
@@ -123,11 +131,9 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
for k, v := range fields {
|
||||
isErrField := false
|
||||
if t := reflect.TypeOf(v); t != nil {
|
||||
switch t.Kind() {
|
||||
case reflect.Func:
|
||||
switch {
|
||||
case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
|
||||
isErrField = true
|
||||
case reflect.Ptr:
|
||||
isErrField = t.Elem().Kind() == reflect.Func
|
||||
}
|
||||
}
|
||||
if isErrField {
|
||||
@@ -212,54 +218,66 @@ func (entry Entry) HasCaller() (has bool) {
|
||||
entry.Caller != nil
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) log(level Level, msg string) {
|
||||
func (entry *Entry) log(level Level, msg string) {
|
||||
var buffer *bytes.Buffer
|
||||
|
||||
// Default to now, but allow users to override if they want.
|
||||
//
|
||||
// We don't have to worry about polluting future calls to Entry#log()
|
||||
// with this assignment because this function is declared with a
|
||||
// non-pointer receiver.
|
||||
if entry.Time.IsZero() {
|
||||
entry.Time = time.Now()
|
||||
newEntry := entry.Dup()
|
||||
|
||||
if newEntry.Time.IsZero() {
|
||||
newEntry.Time = time.Now()
|
||||
}
|
||||
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
entry.Logger.mu.Lock()
|
||||
if entry.Logger.ReportCaller {
|
||||
entry.Caller = getCaller()
|
||||
newEntry.Level = level
|
||||
newEntry.Message = msg
|
||||
|
||||
newEntry.Logger.mu.Lock()
|
||||
reportCaller := newEntry.Logger.ReportCaller
|
||||
bufPool := newEntry.getBufferPool()
|
||||
newEntry.Logger.mu.Unlock()
|
||||
|
||||
if reportCaller {
|
||||
newEntry.Caller = getCaller()
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
|
||||
entry.fireHooks()
|
||||
|
||||
buffer = getBuffer()
|
||||
newEntry.fireHooks()
|
||||
buffer = bufPool.Get()
|
||||
defer func() {
|
||||
entry.Buffer = nil
|
||||
putBuffer(buffer)
|
||||
newEntry.Buffer = nil
|
||||
buffer.Reset()
|
||||
bufPool.Put(buffer)
|
||||
}()
|
||||
buffer.Reset()
|
||||
entry.Buffer = buffer
|
||||
newEntry.Buffer = buffer
|
||||
|
||||
entry.write()
|
||||
newEntry.write()
|
||||
|
||||
entry.Buffer = nil
|
||||
newEntry.Buffer = nil
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(&entry)
|
||||
panic(newEntry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) getBufferPool() (pool BufferPool) {
|
||||
if entry.Logger.BufferPool != nil {
|
||||
return entry.Logger.BufferPool
|
||||
}
|
||||
return bufferPool
|
||||
}
|
||||
|
||||
func (entry *Entry) fireHooks() {
|
||||
var tmpHooks LevelHooks
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
err := entry.Logger.Hooks.Fire(entry.Level, entry)
|
||||
tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
|
||||
for k, v := range entry.Logger.Hooks {
|
||||
tmpHooks[k] = v
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
|
||||
err := tmpHooks.Fire(entry.Level, entry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
}
|
||||
@@ -273,11 +291,14 @@ func (entry *Entry) write() {
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
return
|
||||
}
|
||||
if _, err = entry.Logger.Out.Write(serialized); err != nil {
|
||||
if _, err := entry.Logger.Out.Write(serialized); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Log will log a message at the level given as parameter.
|
||||
// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
|
||||
// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
|
||||
func (entry *Entry) Log(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.log(level, fmt.Sprint(args...))
|
||||
@@ -319,7 +340,6 @@ func (entry *Entry) Fatal(args ...interface{}) {
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
entry.Log(PanicLevel, args...)
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
5
pkg/metadata/vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
5
pkg/metadata/vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
@@ -2,9 +2,8 @@ module github.com/sirupsen/logrus
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/testify v1.2.2
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
16
pkg/metadata/vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
16
pkg/metadata/vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
@@ -1,10 +1,14 @@
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user