Merge pull request #3933 from deitch/main-version-binaries

Main version binaries
This commit is contained in:
Avi Deitcher 2023-06-15 00:31:24 -07:00 committed by GitHub
commit 9eef398d64
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
503 changed files with 50225 additions and 14906 deletions

View File

@ -2,13 +2,13 @@ kernel:
image: linuxkit/kernel:5.4.30
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -19,7 +19,7 @@ services:
# this will keep all of the existing ones as well
- /var/tmp:/var/tmp
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
files:
- path: etc/getty.shadow
# sample sets password for root to "abcdefgh" (without quotes)

View File

@ -2,22 +2,22 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: sysfs
image: linuxkit/sysfs:4f58a2447d328d391143984fc4e8508b00dc6e5b
image: linuxkit/sysfs:5fd982d39ff7bec8e480c67a110acb2d3794c291
- name: format
image: linuxkit/format:9c40b556691c1bf47394603aeb2dbdba21e7e32e
image: linuxkit/format:5161fe240e5824da04d51bcf5e00afcb0c18dc25
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/var/lib/docker"]
services:
@ -26,7 +26,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: ntpd
image: linuxkit/openntpd:dd353cac6cbd816008c565041cec6650090d0ad0

View File

@ -2,21 +2,21 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: format
image: linuxkit/format:9c40b556691c1bf47394603aeb2dbdba21e7e32e
image: linuxkit/format:5161fe240e5824da04d51bcf5e00afcb0c18dc25
command: ["/usr/bin/format", "/dev/sda"]
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/dev/sda1", "/var/external"]
- name: loop
image: linuxkit/losetup:fcee8e453684d45c3c411fe8c28ecb2210158638
@ -25,7 +25,7 @@ onboot:
image: linuxkit/dm-crypt:526d32351c8246431be8e1a168cb514ff3c365af
command: ["/usr/bin/crypto", "crypt_loop_dev", "/dev/loop0"]
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/dev/mapper/crypt_loop_dev", "/var/secure_storage"]
- name: bbox
image: busybox
@ -38,7 +38,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
files:
- path: etc/dm-crypt/key
# the below key is just to keep the example self-contained

View File

@ -2,24 +2,24 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: format
image: linuxkit/format:9c40b556691c1bf47394603aeb2dbdba21e7e32e
image: linuxkit/format:5161fe240e5824da04d51bcf5e00afcb0c18dc25
command: ["/usr/bin/format", "/dev/sda"]
- name: dm-crypt
image: linuxkit/dm-crypt:526d32351c8246431be8e1a168cb514ff3c365af
command: ["/usr/bin/crypto", "crypt_dev", "/dev/sda1"]
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/dev/mapper/crypt_dev", "/var/secure_storage"]
- name: bbox
image: busybox
@ -32,7 +32,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
files:
- path: etc/dm-crypt/key
# the below key is just to keep the example self-contained

View File

@ -4,25 +4,25 @@ kernel:
cmdline: "console=ttyS0 page_poison=1"
init:
- linuxkit/vpnkit-expose-port:c61565ee34e58823aaf7c05fd6359a8fd889137f # install vpnkit-expose-port and vpnkit-iptables-wrapper on host
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
# support metadata for optional config in /run/config
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: sysfs
image: linuxkit/sysfs:4f58a2447d328d391143984fc4e8508b00dc6e5b
image: linuxkit/sysfs:5fd982d39ff7bec8e480c67a110acb2d3794c291
- name: binfmt
image: linuxkit/binfmt:ecd24b710cad869af6a4716809f62d7164286367
image: linuxkit/binfmt:af88a591f9cc896a52ce596b9cf7ca26a061ef97
# Format and mount the disk image in /var/lib/docker
- name: format
image: linuxkit/format:9c40b556691c1bf47394603aeb2dbdba21e7e32e
image: linuxkit/format:5161fe240e5824da04d51bcf5e00afcb0c18dc25
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/var/lib"]
# make a swap file on the mounted disk
- name: swap
@ -75,10 +75,10 @@ services:
command: ["/vpnkit-forwarder", "-vsockPort", "62373"]
# Monitor for image deletes and invoke a TRIM on the container filesystem
- name: trim-after-delete
image: linuxkit/trim-after-delete:736054426ca2db02d62fe78b6910616b4e677107
image: linuxkit/trim-after-delete:dc2b34b38193e6c4f6596f31075a9f6288ac7b09
# When the host resumes from sleep, force a clock resync
- name: host-timesync-daemon
image: linuxkit/host-timesync-daemon:ce5c8fc7989b1c3f9674d1f64245bf8146b1a9fb
image: linuxkit/host-timesync-daemon:562161a3a49a774ccfd5c9f3ba00d933d6f61876
# Run dockerd with the vpnkit userland proxy from the vpnkit-forwarder container.
# Bind mounts /var/run to allow vsudd to connect to docker.sock, /var/vpnkit
# for vpnkit coordination and /run/config/docker for the configuration file.

View File

@ -2,19 +2,19 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: sysfs
image: linuxkit/sysfs:4f58a2447d328d391143984fc4e8508b00dc6e5b
image: linuxkit/sysfs:5fd982d39ff7bec8e480c67a110acb2d3794c291
- name: format
image: linuxkit/format:9c40b556691c1bf47394603aeb2dbdba21e7e32e
image: linuxkit/format:5161fe240e5824da04d51bcf5e00afcb0c18dc25
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/var/lib/docker"]
services:
- name: getty
@ -22,7 +22,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
- name: ntpd

View File

@ -2,13 +2,13 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -19,7 +19,7 @@ services:
#env:
# - INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
files:
- path: etc/getty.shadow
# sample sets password for root to "abcdefgh" (without quotes)

View File

@ -2,13 +2,13 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -30,7 +30,7 @@ services:
destination: writeable-host-etc
options: ["rw", "lowerdir=/etc", "upperdir=/run/hostetc/upper", "workdir=/run/hostetc/work"]
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: nginx
image: nginx:1.13.8-alpine
capabilities:

View File

@ -2,7 +2,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32

View File

@ -3,14 +3,14 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
- linuxkit/memlogd:67e3bc9d0f271336567f4bffbb80a8d57d5eddba
- linuxkit/memlogd:cf7ea20e6b68aacaa888aa178f267dcad602ed05
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -25,6 +25,6 @@ services:
image: alpine:3.13
command: ["/bin/sh", "-c", "while /bin/true; do echo hello $(date); sleep 1; done" ]
- name: write-and-rotate-logs
image: linuxkit/logwrite:f3ba46a1785036931bd156996659e967b594bb83
image: linuxkit/logwrite:107c407b3443fb04eb0a6f69182653836c4e62f9
- name: kmsg
image: linuxkit/kmsg:d86e8b0b44a01195f5ca644adb25994a24703f0f
image: linuxkit/kmsg:ba81a0a3029b4bb7ee455f73892da9667397ca5b

View File

@ -2,7 +2,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
onboot:

View File

@ -2,7 +2,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
services:
@ -11,7 +11,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
- name: node_exporter

View File

@ -2,22 +2,22 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
command: ["/usr/bin/metadata", "openstack"]
services:
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: sshd
image: linuxkit/sshd:62036c2a279715d05e8298b9269a0659964f2619
binds.add:

View File

@ -2,21 +2,21 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
services:
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd2
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf"]

View File

@ -2,16 +2,16 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
services:
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
- name: sshd

View File

@ -2,25 +2,25 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
services:
- name: getty
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: sshd
image: linuxkit/sshd:62036c2a279715d05e8298b9269a0659964f2619
binds.add:

View File

@ -3,26 +3,26 @@ kernel:
cmdline: console=ttyS1
ucode: intel-ucode.cpio
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
- linuxkit/firmware:a17106a98940006529c714a3783eb03238c335a7
onboot:
- name: rngd1
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
command: ["/sbin/rngd", "-1"]
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
command: ["/usr/bin/metadata", "hetzner"]
services:
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: getty
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
env:

View File

@ -3,26 +3,26 @@ kernel:
cmdline: console=ttyS1
ucode: intel-ucode.cpio
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
- linuxkit/firmware:a17106a98940006529c714a3783eb03238c335a7
onboot:
- name: rngd1
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
command: ["/sbin/rngd", "-1"]
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
command: ["/usr/bin/metadata", "packet"]
services:
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: getty
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
env:

View File

@ -2,20 +2,20 @@ kernel:
image: linuxkit/kernel:5.11.4-rt
cmdline: "console=tty0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
services:
- name: getty
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
- name: open-vm-tools

View File

@ -2,25 +2,25 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0 root=/dev/vda"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: rngd1
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
command: ["/sbin/rngd", "-1"]
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
services:
- name: getty
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368

View File

@ -2,20 +2,20 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
services:
- name: getty
image: linuxkit/getty:06f34bce0facea79161566d67345c3ea49965437
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
- name: nginx

View File

@ -2,18 +2,18 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:cd284d211eb6456961bf9988f802492b4b63d5b5
image: linuxkit/metadata:501144d47215671e77b9cac44748a04f21236195
command: ["/usr/bin/metadata", "vultr"]
services:
- name: getty
@ -21,7 +21,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: sshd
image: linuxkit/sshd:62036c2a279715d05e8298b9269a0659964f2619
binds.add:

View File

@ -4,7 +4,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
onboot:

View File

@ -2,15 +2,15 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: rngd1
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
command: ["/sbin/rngd", "-1"]
services:
- name: getty
@ -18,7 +18,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
- name: sshd

View File

@ -2,7 +2,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
onboot:

View File

@ -2,20 +2,20 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: format
image: linuxkit/format:9c40b556691c1bf47394603aeb2dbdba21e7e32e
image: linuxkit/format:5161fe240e5824da04d51bcf5e00afcb0c18dc25
- name: mount
image: linuxkit/mount:a8581e454f846690d09e2e7c6287d3c84ca53257
image: linuxkit/mount:f671cb94a8999a65e33b3fe79f3def58e3d58b07
command: ["/usr/bin/mountie", "/var/external"]
- name: swap
image: linuxkit/swap:d17a7f1c26ff768c26b3c206ccf3aa72349568df
@ -28,4 +28,4 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368

View File

@ -2,13 +2,13 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -20,7 +20,7 @@ services:
- name: tss
image: linuxkit/tss:1246031b0c9d408ceb81790a05dd37bcb9e1c4e1
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
files:
- path: etc/getty.shadow
# sample sets password for root to "abcdefgh" (without quotes)

View File

@ -2,7 +2,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
onboot:

View File

@ -2,7 +2,7 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=ttyS0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
onboot:

View File

@ -2,13 +2,13 @@ kernel:
image: linuxkit/kernel:5.10.104
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -45,7 +45,7 @@ services:
- INSECURE=true
net: /run/netns/wg1
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: nginx
image: nginx:1.13.8-alpine
net: /run/netns/wg0

View File

@ -2,13 +2,13 @@ kernel:
image: linuxkit/kernel:5.15.27
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:14df799bb3b9e0eb0491da9fda7f32a108a2e2a5
- linuxkit/init:6542ad0457ac153861870bfe2d036b6647cdc69f
- linuxkit/runc:436357ce16dd663e24f595bcec26d5ae476c998e
- linuxkit/containerd:eeb3aaf497c0b3f6c67f3a245d61ea5a568ca718
- linuxkit/ca-certificates:4de36e93dc87f7ccebd20db616ed10d381911d32
onboot:
- name: sysctl
image: linuxkit/sysctl:e5959517fab7b44692ad63941eecf37486e73799
image: linuxkit/sysctl:a88a50c104d538b58da5e1441f6f0b4b738f76a6
- name: dhcpcd
image: linuxkit/dhcpcd:2a8ed08fea442909ba10f950d458191ed3647115
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
@ -22,7 +22,7 @@ services:
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:331294919ba6d953d261a2694019b659a98535a4
image: linuxkit/rngd:310c16ec5315bd07d4b8f5332cfa7dc5cbc7d368
- name: nginx
image: nginx:1.19.5-alpine
capabilities:

View File

@ -1,5 +1,5 @@
# Use Debian testing Qemu 4.2.0 until https://bugs.alpinelinux.org/issues/8131 is resolved.
FROM debian@sha256:79f148e13b4c596d4ad7fd617aba3630e37cf04f04538699341ed1267bd61a23 AS qemu
FROM debian@sha256:d828cca5497a2519da9c6d42372066895fa28a69f1e8a46a38ce8f750bd2adf0 AS qemu
RUN apt-get update && apt-get install -y qemu-user-static && \
mv /usr/bin/qemu-aarch64-static /usr/bin/qemu-aarch64 && \
mv /usr/bin/qemu-arm-static /usr/bin/qemu-arm && \
@ -12,6 +12,8 @@ FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS mirror
RUN apk add --no-cache go musl-dev
ENV GOPATH=/go PATH=$PATH:/go/bin
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
COPY . /go/src/binfmt/
RUN go-compile.sh /go/src/binfmt

View File

@ -19,6 +19,9 @@ FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS build
RUN apk add --no-cache go musl-dev
ENV GOPATH=/go PATH=$PATH:/go/bin
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true

View File

@ -21,6 +21,8 @@ RUN apk add --no-cache go musl-dev
ENV GOPATH=/go PATH=$PATH:/go/bin
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
COPY . /go/src/format/
RUN go-compile.sh /go/src/format

View File

@ -2,6 +2,8 @@ FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS mirror
RUN apk add --no-cache go musl-dev git
ENV GOPATH=/go PATH=$PATH:/go/bin
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
COPY . /go/src/host-timesync-daemon
RUN go-compile.sh /go/src/host-timesync-daemon

View File

@ -7,6 +7,9 @@ RUN LDFLAGS=-static CFLAGS=-Werror make usermode-helper
RUN apk add --no-cache go musl-dev
ENV GOPATH=/go PATH=$PATH:/go/bin
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true

View File

@ -1,7 +1,10 @@
FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS mirror
RUN apk add --no-cache go musl-dev linux-headers
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
ENV GOPATH=/go PATH=$PATH:/go/bin
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true

View File

@ -1,7 +1,10 @@
FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS build
RUN apk add --no-cache go musl-dev
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
ENV GOPATH=/go PATH=$PATH:/go/bin
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true

View File

@ -1,6 +1,8 @@
FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS build
RUN apk add --no-cache go musl-dev
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
ENV GOPATH=/go PATH=$PATH:/go/bin
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true

View File

@ -1,6 +1,8 @@
FROM linuxkit/alpine:316c3f9d85c21fdd8bc7479e81d290f85bf60eb0 AS mirror
RUN apk add --no-cache go musl-dev linux-headers
ARG GOPKGVERSION
ENV ldflags="-X main.Version=$GOPKGVERSION"
ENV GOPATH=/go PATH=$PATH:/go/bin GO111MODULE=off
# Hack to work around an issue with go on arm64 requiring gcc
RUN [ $(uname -m) = aarch64 ] && apk add --no-cache gcc || true

View File

@ -3,13 +3,10 @@ module github.com/linuxkit/linuxkit/pkg/metadata
go 1.16
require (
github.com/diskfs/go-diskfs v1.2.1-0.20230123115902-fce1828bbbfa
github.com/diskfs/go-diskfs v1.3.1-0.20230612151643-22d22fd7e558
github.com/packethost/packngo v0.1.0
github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.7.0 // indirect
github.com/sirupsen/logrus v1.9.0
github.com/vishvananda/netlink v0.0.0-20170808154308-f5a6f697a596
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3 // indirect
golang.org/x/crypto v0.0.0-20180515001509-1a580b3eff78 // indirect
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3
)

View File

@ -1,118 +1,41 @@
4d63.com/gochecknoinits v0.0.0-20200108094044-eb73b47b9fc4/go.mod h1:4o1i5aXtIF5tJFt3UD1knCVmWOXg7fLYdHVu6jeNcnM=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/diskfs/go-diskfs v1.1.1 h1:rMjLpaydtXGVZb7mdkRGK1+//30i76nKAit89zUzeaI=
github.com/diskfs/go-diskfs v1.1.1/go.mod h1:afUPxxu+x1snp4aCY2bKR0CoZ/YFJewV3X2UEr2nPZE=
github.com/diskfs/go-diskfs v1.2.0 h1:Ow4xorEDw1VNYKbC+SA/qQNwi5gWIwdKUxmUcLFST24=
github.com/diskfs/go-diskfs v1.2.0/go.mod h1:ZTeTbzixuyfnZW5y5qKMtjV2o+GLLHo1KfMhotJI4Rk=
github.com/diskfs/go-diskfs v1.2.1-0.20230123115902-fce1828bbbfa h1:IjMOtaIqh7PYR3Pw06fMJp0UaWp4g1meiNTwcmH0Aho=
github.com/diskfs/go-diskfs v1.2.1-0.20230123115902-fce1828bbbfa/go.mod h1:3pUpCAz75Q11om5RsGpVKUgXp2Z+ATw1xV500glmCP0=
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
github.com/diskfs/go-diskfs v1.3.1-0.20230612151643-22d22fd7e558 h1:2H5E+tttRQpUKWjIHP66cCnSrn1Z7MWn4O3piQq3TgU=
github.com/diskfs/go-diskfs v1.3.1-0.20230612151643-22d22fd7e558/go.mod h1:G8cyy+ngM+3yKlqjweMmtqvE+TxsnIo1xumbJX1AeLg=
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY=
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
github.com/jgautheron/goconst v0.0.0-20170703170152-9740945f5dcb/go.mod h1:82TxjOpWQiPmywlbIaB2ZkqJoSYJdLGPgAJDvM3PbKc=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mibk/dupl v1.0.0/go.mod h1:pCr4pNxxIbFGvtyCOi0c7LVjmV6duhKWV+ex5vh38ME=
github.com/packethost/packngo v0.1.0 h1:G/5zumXb2fbPm5MAM3y8MmugE66Ehpio5qx0IhdhTPc=
github.com/packethost/packngo v0.1.0/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M=
github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.0.3 h1:B5C/igNWoiULof20pKfY4VntcIPqKuwEmoLZrabbUrc=
github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stripe/safesql v0.2.0/go.mod h1:q7b2n0JmzM1mVGfcYpanfVb2j23cXZeWFxcILPn3JV4=
github.com/tsenart/deadcode v0.0.0-20160724212837-210d2dc333e9/go.mod h1:q+QjxYvZ+fpjMXqs+XEriussHjSYqeXVnAdSV1tkMYk=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vishvananda/netlink v0.0.0-20170808154308-f5a6f697a596 h1:K6pwCps8j1ylaB37G0r6hGajvbNsdm+0ITJ6L88r65w=
github.com/vishvananda/netlink v0.0.0-20170808154308-f5a6f697a596/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3 h1:NcYCJC+LbOrfvuf/uHeM/kxh6vOmiuInC4GAWRdc+P0=
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
golang.org/x/crypto v0.0.0-20180515001509-1a580b3eff78 h1:uJIReYEB1ZZLarzi83Pmig1HhZ/cwFCysx05l0PFBIk=
golang.org/x/crypto v0.0.0-20180515001509-1a580b3eff78/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 h1:v6jG/tdl4O07LNVp74Nt7/OyL+1JsIW1M2f/nSvQheY=
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20200102200121-6de373a2766c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM=
gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o=
gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=

View File

@ -82,6 +82,7 @@ func FindCIs() []string {
log.Debugf("failed to open device read-only: %s: %v", dev, err)
continue
}
disk.DefaultBlocks = true // because this is passed through as a block device, we can get strange blocksize numbers from the OS
fs, err := disk.GetFilesystem(0)
if err != nil {
log.Debugf("failed to get filesystem on partition 0 for device: %s: %v", dev, err)

View File

@ -21,7 +21,6 @@ linters:
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dogsled
- dupl
@ -43,18 +42,16 @@ linters:
- predeclared
- revive
- staticcheck
- structcheck
- stylecheck
- thelper
- tparallel
- typecheck
- unconvert
- unparam
- varcheck
- whitespace
# - wsl # лишние пустые строки и т.д., чистый стиль
# - goconst # проверка на наличие переменных, которых следовало бы вынести в const
# - gomnd # поиск всяких "магических" чисел, переменных
run:
issues-exit-code: 1
issues-exit-code: 1

View File

@ -6,7 +6,7 @@ GOENV ?= GO111MODULE=on CGO_ENABLED=0
GO_FILES ?= $(shell $(GOENV) go list ./...)
GOBIN ?= $(shell go env GOPATH)/bin
LINTER ?= $(GOBIN)/golangci-lint
LINTER_VERSION ?= v1.49.0
LINTER_VERSION ?= v1.51.2
# BUILDARCH is the host architecture
# ARCH is the target architecture

View File

@ -105,7 +105,6 @@ package diskfs
import (
"errors"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
@ -139,6 +138,8 @@ const (
ReadOnly OpenModeOption = iota
// ReadWriteExclusive open file in read-write exclusive mode
ReadWriteExclusive
// ReadWrite open file in read-write mode
ReadWrite
)
// OpenModeOption.String()
@ -148,6 +149,8 @@ func (m OpenModeOption) String() string {
return "read-only"
case ReadWriteExclusive:
return "read-write exclusive"
case ReadWrite:
return "read-write"
default:
return "unknown"
}
@ -156,6 +159,7 @@ func (m OpenModeOption) String() string {
var openModeOptions = map[OpenModeOption]int{
ReadOnly: os.O_RDONLY,
ReadWriteExclusive: os.O_RDWR | os.O_EXCL,
ReadWrite: os.O_RDWR,
}
// SectorSize represents the sector size to use
@ -213,14 +217,9 @@ func initDisk(f *os.File, openMode OpenModeOption, sectorSize SectorSize) (*disk
case mode&os.ModeDevice != 0:
log.Debug("initDisk(): block device")
diskType = disk.Device
file, err := os.Open(f.Name())
size, err = getBlockDeviceSize(f)
if err != nil {
return nil, fmt.Errorf("error opening block device %s: %s", f.Name(), err)
}
defer file.Close()
size, err = file.Seek(0, io.SeekEnd)
if err != nil {
return nil, fmt.Errorf("error seeking to end of block device %s: %s", f.Name(), err)
return nil, fmt.Errorf("error getting block device %s size: %s", f.Name(), err)
}
lblksize, pblksize, err = getSectorSizes(f)
log.Debugf("initDisk(): logical block size %d, physical block size %d", lblksize, pblksize)
@ -327,7 +326,7 @@ func Open(device string, opts ...OpenOpt) (*disk.Disk, error) {
f, err := os.OpenFile(device, m, 0o600)
if err != nil {
return nil, fmt.Errorf("could not open device %s exclusively for writing", device)
return nil, fmt.Errorf("could not open device %s with mode %v: %w", device, m, err)
}
// return our disk
return initDisk(f, ReadWriteExclusive, opt.sectorSize)

View File

@ -14,13 +14,24 @@ const (
DKIOCGETBLOCKCOUNT = 0x40086419
)
// getBlockDeviceSize get the size of an opened block device in Bytes.
func getBlockDeviceSize(f *os.File) (int64, error) {
fd := f.Fd()
blockSize, err := unix.IoctlGetInt(int(fd), DKIOCGETBLOCKSIZE)
if err != nil {
return 0, fmt.Errorf("unable to get device logical sector size: %v", err)
}
blockCount, err := unix.IoctlGetInt(int(fd), DKIOCGETBLOCKCOUNT)
if err != nil {
return 0, fmt.Errorf("unable to get device block count: %v", err)
}
return int64(blockSize) * int64(blockCount), nil
}
// getSectorSizes get the logical and physical sector sizes for a block device
func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
//nolint:gocritic // we keep this for reference to the underlying syscall
/*
ioctl(fd, BLKPBSZGET, &physicalsectsize);
*/
fd := f.Fd()
logicalSectorSizeInt, err := unix.IoctlGetInt(int(fd), DKIOCGETBLOCKSIZE)

View File

@ -1,6 +1,3 @@
//go:build linux || solaris || aix || freebsd || illumos || netbsd || openbsd || plan9
// +build linux solaris aix freebsd illumos netbsd openbsd plan9
package diskfs
import (
@ -10,6 +7,15 @@ import (
"golang.org/x/sys/unix"
)
// getBlockDeviceSize get the size of an opened block device in Bytes.
func getBlockDeviceSize(f *os.File) (int64, error) {
blockDeviceSize, err := unix.IoctlGetInt(int(f.Fd()), unix.BLKGETSIZE64)
if err != nil {
return 0, fmt.Errorf("unable to get block device size: %v", err)
}
return int64(blockDeviceSize), nil
}
// getSectorSizes get the logical and physical sector sizes for a block device
func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
//

View File

@ -0,0 +1,18 @@
//go:build !windows && !linux && !darwin
package diskfs
import (
"errors"
"os"
)
// getBlockDeviceSize get the size of an opened block device in Bytes.
func getBlockDeviceSize(f *os.File) (int64, error) {
return 0, errors.New("block devices not supported on this platform")
}
// getSectorSizes get the logical and physical sector sizes for a block device
func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
return 0, 0, errors.New("block devices not supported on this platform")
}

View File

@ -5,6 +5,11 @@ import (
"os"
)
// getBlockDeviceSize get the size of an opened block device in Bytes.
func getBlockDeviceSize(f *os.File) (int64, error) {
return 0, errors.New("block devices not supported on windows")
}
// getSectorSizes get the logical and physical sector sizes for a block device
func getSectorSizes(f *os.File) (int64, int64, error) {
return 0, 0, errors.New("block devices not supported on windows")

View File

@ -0,0 +1,116 @@
package filesystem
import (
"io/fs"
"os"
"path"
"time"
)
type fsCompatible struct {
fs FileSystem
}
type fsFileWrapper struct {
File
stat os.FileInfo
}
type fakeRootDir struct{}
func (d *fakeRootDir) Name() string { return "/" }
func (d *fakeRootDir) Size() int64 { return 0 }
func (d *fakeRootDir) Mode() fs.FileMode { return 0 }
func (d *fakeRootDir) ModTime() time.Time { return time.Now() }
func (d *fakeRootDir) IsDir() bool { return true }
func (d *fakeRootDir) Sys() any { return nil }
type fsDirWrapper struct {
name string
compat *fsCompatible
stat os.FileInfo
}
func (f *fsDirWrapper) Close() error {
return nil
}
func (f *fsDirWrapper) Read([]byte) (int, error) {
return 0, fs.ErrInvalid
}
func (f *fsDirWrapper) ReadDir(n int) ([]fs.DirEntry, error) {
entries, err := f.compat.ReadDir(f.name)
if err != nil {
return nil, err
}
if n < 0 || n >= len(entries) {
n = len(entries)
}
return entries[:n], nil
}
func (f *fsDirWrapper) Stat() (fs.FileInfo, error) {
return f.stat, nil
}
func (f *fsFileWrapper) Stat() (fs.FileInfo, error) {
return f.stat, nil
}
// Converts the relative path name to an absolute one
func absoluteName(name string) string {
if name == "." {
name = "/"
}
if name[0] != '/' {
name = "/" + name
}
return name
}
func (f *fsCompatible) Open(name string) (fs.File, error) {
var stat os.FileInfo
name = absoluteName(name)
if name == "/" {
return &fsDirWrapper{name: name, compat: f, stat: &fakeRootDir{}}, nil
}
dirname := path.Dir(name)
if info, err := f.fs.ReadDir(dirname); err == nil {
for i := range info {
if info[i].Name() == path.Base(name) {
stat = info[i]
break
}
}
}
if stat == nil {
return nil, fs.ErrNotExist
}
if stat.IsDir() {
return &fsDirWrapper{name: name, compat: f, stat: stat}, nil
}
file, err := f.fs.OpenFile(name, os.O_RDONLY)
if err != nil {
return nil, err
}
return &fsFileWrapper{File: file, stat: stat}, nil
}
func (f *fsCompatible) ReadDir(name string) ([]fs.DirEntry, error) {
entries, err := f.fs.ReadDir(name)
if err != nil {
return nil, err
}
direntries := make([]fs.DirEntry, len(entries))
for i := range entries {
direntries[i] = fs.FileInfoToDirEntry(entries[i])
}
return direntries, nil
}
// FS converts a diskfs FileSystem to a fs.FS for compatibility with
// other utilities
func FS(f FileSystem) fs.ReadDirFS {
return &fsCompatible{f}
}

View File

@ -6,6 +6,8 @@ import (
"regexp"
"strings"
"time"
"github.com/elliotwutingfeng/asciiset"
)
// AccessRights is the byte mask representing access rights to a FAT file
@ -18,60 +20,7 @@ const (
)
// valid shortname characters - [A-F][0-9][$%'-_@~`!(){}^#&]
var validShortNameCharacters = map[byte]bool{
0x21: true, // !
0x23: true, // #
0x24: true, // $
0x25: true, // %
0x26: true, // &
0x27: true, // '
0x28: true, // (
0x29: true, // )
0x2d: true, // -
0x30: true, // 0
0x31: true, // 1
0x32: true, // 2
0x33: true, // 3
0x34: true, // 4
0x35: true, // 5
0x36: true, // 6
0x37: true, // 7
0x38: true, // 8
0x39: true, // 9
0x40: true, // @
0x41: true, // A
0x42: true, // B
0x43: true, // C
0x44: true, // D
0x45: true, // E
0x46: true, // F
0x47: true, // G
0x48: true, // H
0x49: true, // I
0x4a: true, // J
0x4b: true, // K
0x4c: true, // L
0x4d: true, // M
0x4e: true, // N
0x4f: true, // O
0x50: true, // P
0x51: true, // Q
0x52: true, // R
0x53: true, // S
0x54: true, // T
0x55: true, // U
0x56: true, // V
0x57: true, // W
0x58: true, // X
0x59: true, // Y
0x5a: true, // Z
0x5e: true, // ^
0x5f: true, // _
0x60: true, // `
0x7b: true, // {
0x7d: true, // }
0x7e: true, // ~
}
var validShortNameCharacters, _ = asciiset.MakeASCIISet("!#$%&'()-0123456789@ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`{}~")
// directoryEntry is a single directory entry
//
@ -401,7 +350,7 @@ func stringToValidASCIIBytes(s string) ([]byte, error) {
// now make sure every byte is valid
for _, b2 := range b {
// only valid chars - 0-9, A-Z, _, ~
if validShortNameCharacters[b2] {
if validShortNameCharacters.Contains(b2) {
continue
}
return nil, fmt.Errorf("invalid 8.3 character")
@ -489,7 +438,7 @@ func uCaseValid(name string) string {
r2 := make([]rune, 0, len(r))
for _, val := range r {
switch {
case validShortNameCharacters[byte(val)]:
case validShortNameCharacters.Contains(byte(val)):
r2 = append(r2, val)
case (0x61 <= val && val <= 0x7a):
// lower-case characters should be upper-cased

View File

@ -7,7 +7,7 @@ import (
"fmt"
"io"
"github.com/pierrec/lz4"
"github.com/pierrec/lz4/v4"
"github.com/ulikunitz/xz"
"github.com/ulikunitz/xz/lzma"
)

View File

@ -1,16 +1,15 @@
module github.com/diskfs/go-diskfs
go 1.16
go 1.19
require (
github.com/frankban/quicktest v1.13.0 // indirect
github.com/go-test/deep v1.0.8 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.1.1
github.com/pierrec/lz4 v2.3.0+incompatible
github.com/pkg/xattr v0.4.1
github.com/sirupsen/logrus v1.7.0
github.com/ulikunitz/xz v0.5.10
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
gopkg.in/djherbis/times.v1 v1.2.0
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab
github.com/go-test/deep v1.0.8
github.com/google/uuid v1.3.0
github.com/pierrec/lz4/v4 v4.1.17
github.com/pkg/xattr v0.4.9
github.com/sirupsen/logrus v1.9.0
github.com/ulikunitz/xz v0.5.11
golang.org/x/sys v0.5.0
gopkg.in/djherbis/times.v1 v1.3.0
)

View File

@ -1,37 +1,31 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY=
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM=
gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o=
gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,6 +1,7 @@
package gpt
import (
"bytes"
"encoding/binary"
"fmt"
"io"
@ -15,6 +16,8 @@ import (
// PartitionEntrySize fixed size of a GPT partition entry
const PartitionEntrySize = 128
var zeroUUIDBytes = make([]byte, 16)
// Partition represents the structure of a single partition on the disk
type Partition struct {
Start uint64 // start sector for the partition
@ -90,6 +93,9 @@ func partitionFromBytes(b []byte, logicalSectorSize, physicalSectorSize int) (*P
return nil, fmt.Errorf("data for partition was %d bytes instead of expected %d", len(b), PartitionEntrySize)
}
// is it all zeroes?
if bytes.Equal(b[0:16], zeroUUIDBytes) {
return nil, nil
}
typeGUID, err := uuid.FromBytes(bytesToUUIDBytes(b[0:16]))
if err != nil {
return nil, fmt.Errorf("unable to read partition type GUID: %v", err)

View File

@ -294,7 +294,7 @@ func (t *Table) toGPTBytes(primary bool) ([]byte, error) {
copy(b[56:72], bytesToUUIDBytes(guid[0:16]))
// starting LBA of array of partition entries
binary.LittleEndian.PutUint64(b[72:80], t.partitionArraySector(primary))
binary.LittleEndian.PutUint64(b[72:80], t.partitionArraySector(true))
// how many entries?
binary.LittleEndian.PutUint32(b[80:84], uint32(t.partitionArraySize))
@ -337,6 +337,9 @@ func readPartitionArrayBytes(b []byte, entrySize, logicalSectorSize, physicalSec
if err != nil {
return nil, fmt.Errorf("error reading partition entry %d: %v", i, err)
}
if p == nil {
continue
}
// augment partition information
p.Size = (p.End - p.Start + 1) * uint64(logicalSectorSize)
parts = append(parts, p)

View File

@ -0,0 +1,23 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
*.html
*.prof
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 655 KiB

View File

@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
wutingfeng@outlook.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@ -0,0 +1,69 @@
# Credits
This application uses code from other open-source projects. The copyright statements of these open-source projects are listed below.
## Bit
Source: <https://github.com/yourbasic/bit>
```markdown
BSD 2-Clause License
Copyright (c) 2017, Stefan Nilsson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
## Go
Source: <https://github.com/golang/go>
```markdown
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```

View File

@ -0,0 +1,28 @@
BSD 3-Clause License
Copyright (c) 2022, Wu Tingfeng
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,20 @@
tests:
go test -v -race -covermode atomic -coverprofile coverage.out && go tool cover -html coverage.out -o coverage.html
tests_without_race:
go test -v -covermode atomic -coverprofile coverage.out && go tool cover -html coverage.out -o coverage.html
format:
go fmt ./...
bench:
go test -bench . -benchmem -cpu 1
report_bench:
go test -cpuprofile cpu.prof -memprofile mem.prof -bench . -cpu 1
cpu_report:
go tool pprof cpu.prof
mem_report:
go tool pprof mem.prof

View File

@ -0,0 +1,95 @@
# asciiset
[![Go Reference](https://img.shields.io/badge/go-reference-blue?logo=go&logoColor=white&style=for-the-badge)](https://pkg.go.dev/github.com/elliotwutingfeng/asciiset)
[![Go Report Card](https://goreportcard.com/badge/github.com/elliotwutingfeng/asciiset?style=for-the-badge)](https://goreportcard.com/report/github.com/elliotwutingfeng/asciiset)
[![Codecov Coverage](https://img.shields.io/codecov/c/github/elliotwutingfeng/asciiset?color=bright-green&logo=codecov&style=for-the-badge&token=5ukdyK4pOG)](https://codecov.io/gh/elliotwutingfeng/asciiset)
[![GitHub license](https://img.shields.io/badge/LICENSE-BSD--3--CLAUSE-GREEN?style=for-the-badge)](LICENSE)
## Summary
**asciiset** is an [ASCII](https://simple.wikipedia.org/wiki/ASCII) character bitset.
Bitsets are fast and memory-efficient data structures for storing and retrieving information using bitwise operations.
**asciiset** is an extension of the **asciiSet** data structure from the Go Standard library [source code](https://cs.opensource.google/go/go/+/master:src/bytes/bytes.go).
Possible applications include checking strings for prohibited ASCII characters, and counting unique ASCII characters in a string.
Spot any bugs? Report them [here](https://github.com/elliotwutingfeng/asciiset/issues).
![ASCII Table](ASCII-Table.svg)
## Installation
```bash
go get github.com/elliotwutingfeng/asciiset
```
## Testing
```bash
make tests
# Alternatively, run tests without race detection
# Useful for systems that do not support the -race flag like windows/386
# See https://tip.golang.org/src/cmd/dist/test.go
make tests_without_race
```
## Benchmarks
```bash
make bench
```
### Results
```text
CPU: AMD Ryzen 7 5800X
Time in nanoseconds (ns) | Lower is better
ASCIISet
Add() ▏ 891 🟦🟦🟦 11x faster
Contains() ▏ 580 🟦🟦 28x faster
Remove() ▏ 1570 🟦🟦🟦🟦 1.5x faster
Size() ▏ 313 🟦 equivalent
Visit() ▏ 1421 🟦🟦🟦🟦 3.5x faster
map[byte]struct{}
Add() ▏ 9850 🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥
Contains() ▏16605 🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥
Remove() ▏ 2510 🟥🟥🟥🟥🟥🟥
Size() ▏ 318 🟥
Visit() ▏ 5085 🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥
```
```bash
go test -bench . -benchmem -cpu 1
goos: linux
goarch: amd64
pkg: github.com/elliotwutingfeng/asciiset
cpu: AMD Ryzen 7 5800X 8-Core Processor
BenchmarkASCIISet/ASCIISet_Add() 1340958 891.8 ns/op 0 B/op 0 allocs/op
BenchmarkASCIISet/ASCIISet_Contains() 2058140 580.9 ns/op 0 B/op 0 allocs/op
BenchmarkASCIISet/ASCIISet_Remove() 762636 1570 ns/op 0 B/op 0 allocs/op
BenchmarkASCIISet/ASCIISet_Size() 3808866 313.2 ns/op 0 B/op 0 allocs/op
BenchmarkASCIISet/ASCIISet_Visit() 840808 1421 ns/op 0 B/op 0 allocs/op
BenchmarkMapSet/map_Add 122043 9850 ns/op 0 B/op 0 allocs/op
BenchmarkMapSet/map_Contains 72583 16605 ns/op 0 B/op 0 allocs/op
BenchmarkMapSet/map_Remove 451785 2510 ns/op 0 B/op 0 allocs/op
BenchmarkMapSet/map_Size 3789381 318.3 ns/op 0 B/op 0 allocs/op
BenchmarkMapSet/map_Visit 235515 5085 ns/op 0 B/op 0 allocs/op
PASS
ok github.com/elliotwutingfeng/asciiset 14.438s
```

View File

@ -0,0 +1,116 @@
// Package asciiset is an ASCII character bitset
package asciiset
import (
"unicode/utf8"
)
// ASCIISet is a 36-byte value, where each bit in the first 32-bytes
// represents the presence of a given ASCII character in the set.
// The remaining 4-bytes is a counter for the number of ASCII characters in the set.
// The 128-bits of the first 16 bytes, starting with the least-significant bit
// of the lowest word to the most-significant bit of the highest word,
// map to the full range of all 128 ASCII characters.
// The 128-bits of the next 16 bytes will be zeroed,
// ensuring that any non-ASCII character will be reported as not in the set.
// Rejecting non-ASCII characters in this way avoids bounds checks in ASCIISet.Contains.
type ASCIISet [9]uint32
// MakeASCIISet creates a set of ASCII characters and reports whether all
// characters in chars are ASCII.
func MakeASCIISet(chars string) (as ASCIISet, ok bool) {
for i := 0; i < len(chars); i++ {
c := chars[i]
if c >= utf8.RuneSelf {
return as, false
}
as.Add(c)
}
return as, true
}
// Add inserts character c into the set.
func (as *ASCIISet) Add(c byte) {
if c < utf8.RuneSelf { // ensure that c is an ASCII byte
before := as[c/32]
as[c/32] |= 1 << (c % 32)
if before != as[c/32] {
as[8]++
}
}
}
// Contains reports whether c is inside the set.
func (as *ASCIISet) Contains(c byte) bool {
return (as[c/32] & (1 << (c % 32))) != 0
}
// Remove removes c from the set
//
// if c is not in the set, the set contents will remain unchanged.
func (as *ASCIISet) Remove(c byte) {
if c < utf8.RuneSelf { // ensure that c is an ASCII byte
before := as[c/32]
as[c/32] &^= 1 << (c % 32)
if before != as[c/32] {
as[8]--
}
}
}
// Size returns the number of characters in the set.
func (as *ASCIISet) Size() int {
return int(as[8])
}
// Union returns a new set containing all characters that belong to either as and as2.
func (as *ASCIISet) Union(as2 ASCIISet) (as3 ASCIISet) {
as3[0] = as[0] | as2[0]
as3[1] = as[1] | as2[1]
as3[2] = as[2] | as2[2]
as3[3] = as[3] | as2[3]
return
}
// Intersection returns a new set containing all characters that belong to both as and as2.
func (as *ASCIISet) Intersection(as2 ASCIISet) (as3 ASCIISet) {
as3[0] = as[0] & as2[0]
as3[1] = as[1] & as2[1]
as3[2] = as[2] & as2[2]
as3[3] = as[3] & as2[3]
return
}
// Subtract returns a new set containing all characters that belong to as but not as2.
func (as *ASCIISet) Subtract(as2 ASCIISet) (as3 ASCIISet) {
as3[0] = as[0] &^ as2[0]
as3[1] = as[1] &^ as2[1]
as3[2] = as[2] &^ as2[2]
as3[3] = as[3] &^ as2[3]
return
}
// Equals reports whether as contains the same characters as as2.
func (as *ASCIISet) Equals(as2 ASCIISet) bool {
return as[0] == as2[0] && as[1] == as2[1] && as[2] == as2[2] && as[3] == as2[3]
}
// Visit calls the do function for each character of as in ascending numerical order.
// If do returns true, Visit returns immediately, skipping any remaining
// characters, and returns true. It is safe for do to Add or Remove
// characters. The behavior of Visit is undefined if do changes
// the set in any other way.
func (as *ASCIISet) Visit(do func(n byte) (skip bool)) (aborted bool) {
var currentChar byte
for i := uint(0); i < 4; i++ {
for j := uint(0); j < 32; j++ {
if (as[i] & (1 << j)) != 0 {
if do(currentChar) {
return true
}
}
currentChar++
}
}
return false
}

View File

@ -0,0 +1,25 @@
codecov:
require_ci_to_pass: yes
coverage:
precision: 2
round: down
range: "90...100"
status:
project:
default:
target: 90%
threshold: 5%
patch: off
parsers:
gcov:
branch_detection:
conditional: yes
loop: yes
method: no
macro: no
comment:
layout: "reach,diff,flags,files,footer"
behavior: default
require_changes: no

View File

@ -0,0 +1,3 @@
module github.com/elliotwutingfeng/asciiset
go 1.11

View File

@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID).
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid
http://pkg.go.dev/github.com/google/uuid

View File

@ -26,8 +26,8 @@ var (
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
h.Write(space[:]) //nolint:errcheck
h.Write(data) //nolint:errcheck
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)

View File

@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) {
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err == nil {
*uuid = id
if err != nil {
return err
}
return err
*uuid = id
return nil
}
// MarshalBinary implements encoding.BinaryMarshaler.

118
pkg/metadata/vendor/github.com/google/uuid/null.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2021 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
)
var jsonNull = []byte("null")
// NullUUID represents a UUID that may be null.
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL
}
// Scan implements the SQL driver.Scanner interface.
func (nu *NullUUID) Scan(value interface{}) error {
if value == nil {
nu.UUID, nu.Valid = Nil, false
return nil
}
err := nu.UUID.Scan(value)
if err != nil {
nu.Valid = false
return err
}
nu.Valid = true
return nil
}
// Value implements the driver Valuer interface.
func (nu NullUUID) Value() (driver.Value, error) {
if !nu.Valid {
return nil, nil
}
// Delegate to UUID Value function
return nu.UUID.Value()
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (nu NullUUID) MarshalBinary() ([]byte, error) {
if nu.Valid {
return nu.UUID[:], nil
}
return []byte(nil), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(nu.UUID[:], data)
nu.Valid = true
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (nu NullUUID) MarshalText() ([]byte, error) {
if nu.Valid {
return nu.UUID.MarshalText()
}
return jsonNull, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (nu *NullUUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
nu.Valid = false
return err
}
nu.UUID = id
nu.Valid = true
return nil
}
// MarshalJSON implements json.Marshaler.
func (nu NullUUID) MarshalJSON() ([]byte, error) {
if nu.Valid {
return json.Marshal(nu.UUID)
}
return jsonNull, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonNull) {
*nu = NullUUID{}
return nil // valid null UUID
}
err := json.Unmarshal(data, &nu.UUID)
nu.Valid = err == nil
return err
}

View File

@ -9,7 +9,7 @@ import (
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {

View File

@ -12,6 +12,7 @@ import (
"fmt"
"io"
"strings"
"sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
@ -33,7 +34,27 @@ const (
Future // Reserved for future definition.
)
var rander = rand.Reader // random function
const randPoolSize = 16 * 16
var (
rander = rand.Reader // random function
poolEnabled = false
poolMu sync.Mutex
poolPos = randPoolSize // protected with poolMu
pool [randPoolSize]byte // protected with poolMu
)
type invalidLengthError struct{ len int }
func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
// IsInvalidLengthError is matcher function for custom error invalidLengthError
func IsInvalidLengthError(err error) bool {
_, ok := err.(invalidLengthError)
return ok
}
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) {
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
return uuid, invalidLengthError{len(s)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) {
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
return uuid, invalidLengthError{len(b)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@ -243,3 +264,31 @@ func SetRand(r io.Reader) {
}
rander = r
}
// EnableRandPool enables internal randomness pool used for Random
// (Version 4) UUID generation. The pool contains random bytes read from
// the random number generator on demand in batches. Enabling the pool
// may improve the UUID generation throughput significantly.
//
// Since the pool is stored on the Go heap, this feature may be a bad fit
// for security sensitive applications.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func EnableRandPool() {
poolEnabled = true
}
// DisableRandPool disables the randomness pool if it was previously
// enabled with EnableRandPool.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func DisableRandPool() {
poolEnabled = false
defer poolMu.Unlock()
poolMu.Lock()
poolPos = randPoolSize
}

View File

@ -17,12 +17,6 @@ import (
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID
now, seq, err := GetTime()
if err != nil {
@ -38,7 +32,13 @@ func NewUUID() (UUID, error) {
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

View File

@ -14,11 +14,21 @@ func New() UUID {
return Must(NewRandom())
}
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// Uses the randomness pool if it was enabled with EnableRandPool.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
@ -27,8 +37,16 @@ func New() UUID {
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)
}
return newRandomFromPool()
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID
_, err := io.ReadFull(rander, uuid[:])
_, err := io.ReadFull(r, uuid[:])
if err != nil {
return Nil, err
}
@ -36,3 +54,23 @@ func NewRandom() (UUID, error) {
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
func newRandomFromPool() (UUID, error) {
var uuid UUID
poolMu.Lock()
if poolPos == randPoolSize {
_, err := io.ReadFull(rander, pool[:])
if err != nil {
poolMu.Unlock()
return Nil, err
}
poolPos = 0
}
copy(uuid[:], pool[poolPos:(poolPos+16)])
poolPos += 16
poolMu.Unlock()
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

View File

@ -1,24 +0,0 @@
language: go
env:
- GO111MODULE=off
go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- master
matrix:
fast_finish: true
allow_failures:
- go: master
sudo: false
script:
- go test -v -cpu=2
- go test -v -cpu=2 -race
- go test -v -cpu=2 -tags noasm
- go test -v -cpu=2 -race -tags noasm

View File

@ -1,23 +0,0 @@
// +build lz4debug
package lz4
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
const debugFlag = true
func debug(args ...interface{}) {
_, file, line, _ := runtime.Caller(1)
file = filepath.Base(file)
f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
if f[len(f)-1] != '\n' {
f += "\n"
}
fmt.Fprintf(os.Stderr, f, args[1:]...)
}

View File

@ -1,7 +0,0 @@
// +build !lz4debug
package lz4
const debugFlag = false
func debug(args ...interface{}) {}

View File

@ -1,8 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
package lz4
//go:noescape
func decodeBlock(dst, src []byte) int

View File

@ -1,98 +0,0 @@
// +build !amd64 appengine !gc noasm
package lz4
func decodeBlock(dst, src []byte) (ret int) {
const hasError = -2
defer func() {
if recover() != nil {
ret = hasError
}
}()
var si, di int
for {
// Literals and match lengths (token).
b := int(src[si])
si++
// Literals.
if lLen := b >> 4; lLen > 0 {
switch {
case lLen < 0xF && si+16 < len(src):
// Shortcut 1
// if we have enough room in src and dst, and the literals length
// is small enough (0..14) then copy all 16 bytes, even if not all
// are part of the literals.
copy(dst[di:], src[si:si+16])
si += lLen
di += lLen
if mLen := b & 0xF; mLen < 0xF {
// Shortcut 2
// if the match length (4..18) fits within the literals, then copy
// all 18 bytes, even if not all are part of the literals.
mLen += 4
if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
i := di - offset
end := i + 18
if end > len(dst) {
// The remaining buffer may not hold 18 bytes.
// See https://github.com/pierrec/lz4/issues/51.
end = len(dst)
}
copy(dst[di:], dst[i:end])
si += 2
di += mLen
continue
}
}
case lLen == 0xF:
for src[si] == 0xFF {
lLen += 0xFF
si++
}
lLen += int(src[si])
si++
fallthrough
default:
copy(dst[di:di+lLen], src[si:si+lLen])
si += lLen
di += lLen
}
}
if si >= len(src) {
return di
}
offset := int(src[si]) | int(src[si+1])<<8
if offset == 0 {
return hasError
}
si += 2
// Match.
mLen := b & 0xF
if mLen == 0xF {
for src[si] == 0xFF {
mLen += 0xFF
si++
}
mLen += int(src[si])
si++
}
mLen += minMatch
// Copy the match.
expanded := dst[di-offset:]
if mLen > offset {
// Efficiently copy the match dst[di-offset:di] into the dst slice.
bytesToCopy := offset * (mLen / offset)
for n := offset; n <= bytesToCopy+offset; n *= 2 {
copy(expanded[n:], expanded[:n])
}
di += bytesToCopy
mLen -= bytesToCopy
}
di += copy(dst[di:di+mLen], expanded[:mLen])
}
}

View File

@ -1,30 +0,0 @@
package lz4
import (
"errors"
"fmt"
"os"
rdebug "runtime/debug"
)
var (
// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
// block is corrupted or the destination buffer is not large enough for the uncompressed data.
ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
// ErrInvalid is returned when reading an invalid LZ4 archive.
ErrInvalid = errors.New("lz4: bad magic number")
// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
ErrBlockDependency = errors.New("lz4: block dependency not supported")
// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
)
func recoverBlock(e *error) {
if r := recover(); r != nil && *e == nil {
if debugFlag {
fmt.Fprintln(os.Stderr, r)
rdebug.PrintStack()
}
*e = ErrInvalidSourceShortBuffer
}
}

View File

@ -1,66 +0,0 @@
// Package lz4 implements reading and writing lz4 compressed data (a frame),
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
//
// Although the block level compression and decompression functions are exposed and are fully compatible
// with the lz4 block format definition, they are low level and should not be used directly.
// For a complete description of an lz4 compressed block, see:
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
//
// See https://github.com/Cyan4973/lz4 for the reference C implementation.
//
package lz4
const (
// Extension is the LZ4 frame file name extension
Extension = ".lz4"
// Version is the LZ4 frame format version
Version = 1
frameMagic uint32 = 0x184D2204
frameSkipMagic uint32 = 0x184D2A50
// The following constants are used to setup the compression algorithm.
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
winSizeLog = 16 // LZ4 64Kb window size limit
winSize = 1 << winSizeLog
winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
compressedBlockFlag = 1 << 31
compressedBlockMask = compressedBlockFlag - 1
// hashLog determines the size of the hash table used to quickly find a previous match position.
// Its value influences the compression speed and memory usage, the lower the faster,
// but at the expense of the compression ratio.
// 16 seems to be the best compromise for fast compression.
hashLog = 16
htSize = 1 << hashLog
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
)
// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
const (
blockSize64K = 64 << 10
blockSize256K = 256 << 10
blockSize1M = 1 << 20
blockSize4M = 4 << 20
)
var (
bsMapID = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M}
bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7}
)
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
// The default values match those of the LZ4 frame format definition
// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
//
// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
// It is the caller responsibility to check them if necessary.
type Header struct {
BlockChecksum bool // Compressed blocks checksum flag.
NoChecksum bool // Frame checksum flag.
BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
Size uint64 // Frame total size. It is _not_ computed by the Writer.
CompressionLevel int // Compression level (higher is better, use 0 for fastest compression).
done bool // Header processed flag (Read or Write and checked).
}

View File

@ -1,29 +0,0 @@
//+build go1.10
package lz4
import (
"fmt"
"strings"
)
func (h Header) String() string {
var s strings.Builder
s.WriteString(fmt.Sprintf("%T{", h))
if h.BlockChecksum {
s.WriteString("BlockChecksum: true ")
}
if h.NoChecksum {
s.WriteString("NoChecksum: true ")
}
if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
}
if l := h.CompressionLevel; l != 0 {
s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
}
s.WriteByte('}')
return s.String()
}

View File

@ -1,29 +0,0 @@
//+build !go1.10
package lz4
import (
"bytes"
"fmt"
)
func (h Header) String() string {
var s bytes.Buffer
s.WriteString(fmt.Sprintf("%T{", h))
if h.BlockChecksum {
s.WriteString("BlockChecksum: true ")
}
if h.NoChecksum {
s.WriteString("NoChecksum: true ")
}
if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
}
if l := h.CompressionLevel; l != 0 {
s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
}
s.WriteByte('}')
return s.String()
}

View File

@ -1,335 +0,0 @@
package lz4
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"github.com/pierrec/lz4/internal/xxh32"
)
// Reader implements the LZ4 frame decoder.
// The Header is set after the first call to Read().
// The Header may change between Read() calls in case of concatenated frames.
type Reader struct {
Header
// Handler called when a block has been successfully read.
// It provides the number of bytes read.
OnBlockDone func(size int)
buf [8]byte // Scrap buffer.
pos int64 // Current position in src.
src io.Reader // Source.
zdata []byte // Compressed data.
data []byte // Uncompressed data.
idx int // Index of unread bytes into data.
checksum xxh32.XXHZero // Frame hash.
skip int64 // Bytes to skip before next read.
dpos int64 // Position in dest
}
// NewReader returns a new LZ4 frame decoder.
// No access to the underlying io.Reader is performed.
func NewReader(src io.Reader) *Reader {
r := &Reader{src: src}
return r
}
// readHeader checks the frame magic number and parses the frame descriptoz.
// Skippable frames are supported even as a first frame although the LZ4
// specifications recommends skippable frames not to be used as first frames.
func (z *Reader) readHeader(first bool) error {
defer z.checksum.Reset()
buf := z.buf[:]
for {
magic, err := z.readUint32()
if err != nil {
z.pos += 4
if !first && err == io.ErrUnexpectedEOF {
return io.EOF
}
return err
}
if magic == frameMagic {
break
}
if magic>>8 != frameSkipMagic>>8 {
return ErrInvalid
}
skipSize, err := z.readUint32()
if err != nil {
return err
}
z.pos += 4
m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
if err != nil {
return err
}
z.pos += m
}
// Header.
if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
return err
}
z.pos += 8
b := buf[0]
if v := b >> 6; v != Version {
return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
}
if b>>5&1 == 0 {
return ErrBlockDependency
}
z.BlockChecksum = b>>4&1 > 0
frameSize := b>>3&1 > 0
z.NoChecksum = b>>2&1 == 0
bmsID := buf[1] >> 4 & 0x7
bSize, ok := bsMapID[bmsID]
if !ok {
return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
}
z.BlockMaxSize = bSize
// Allocate the compressed/uncompressed buffers.
// The compressed buffer cannot exceed the uncompressed one.
if n := 2 * bSize; cap(z.zdata) < n {
z.zdata = make([]byte, n, n)
}
if debugFlag {
debug("header block max size id=%d size=%d", bmsID, bSize)
}
z.zdata = z.zdata[:bSize]
z.data = z.zdata[:cap(z.zdata)][bSize:]
z.idx = len(z.data)
_, _ = z.checksum.Write(buf[0:2])
if frameSize {
buf := buf[:8]
if _, err := io.ReadFull(z.src, buf); err != nil {
return err
}
z.Size = binary.LittleEndian.Uint64(buf)
z.pos += 8
_, _ = z.checksum.Write(buf)
}
// Header checksum.
if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
return err
}
z.pos++
if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
}
z.Header.done = true
if debugFlag {
debug("header read: %v", z.Header)
}
return nil
}
// Read decompresses data from the underlying source into the supplied buffer.
//
// Since there can be multiple streams concatenated, Header values may
// change between calls to Read(). If that is the case, no data is actually read from
// the underlying io.Reader, to allow for potential input buffer resizing.
func (z *Reader) Read(buf []byte) (int, error) {
if debugFlag {
debug("Read buf len=%d", len(buf))
}
if !z.Header.done {
if err := z.readHeader(true); err != nil {
return 0, err
}
if debugFlag {
debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
}
}
if len(buf) == 0 {
return 0, nil
}
if z.idx == len(z.data) {
// No data ready for reading, process the next block.
if debugFlag {
debug("reading block from writer")
}
// Reset uncompressed buffer
z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
// Block length: 0 = end of frame, highest bit set: uncompressed.
bLen, err := z.readUint32()
if err != nil {
return 0, err
}
z.pos += 4
if bLen == 0 {
// End of frame reached.
if !z.NoChecksum {
// Validate the frame checksum.
checksum, err := z.readUint32()
if err != nil {
return 0, err
}
if debugFlag {
debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
}
z.pos += 4
if h := z.checksum.Sum32(); checksum != h {
return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
}
}
// Get ready for the next concatenated frame and keep the position.
pos := z.pos
z.Reset(z.src)
z.pos = pos
// Since multiple frames can be concatenated, check for more.
return 0, z.readHeader(false)
}
if debugFlag {
debug("raw block size %d", bLen)
}
if bLen&compressedBlockFlag > 0 {
// Uncompressed block.
bLen &= compressedBlockMask
if debugFlag {
debug("uncompressed block size %d", bLen)
}
if int(bLen) > cap(z.data) {
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
}
z.data = z.data[:bLen]
if _, err := io.ReadFull(z.src, z.data); err != nil {
return 0, err
}
z.pos += int64(bLen)
if z.OnBlockDone != nil {
z.OnBlockDone(int(bLen))
}
if z.BlockChecksum {
checksum, err := z.readUint32()
if err != nil {
return 0, err
}
z.pos += 4
if h := xxh32.ChecksumZero(z.data); h != checksum {
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
}
}
} else {
// Compressed block.
if debugFlag {
debug("compressed block size %d", bLen)
}
if int(bLen) > cap(z.data) {
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
}
zdata := z.zdata[:bLen]
if _, err := io.ReadFull(z.src, zdata); err != nil {
return 0, err
}
z.pos += int64(bLen)
if z.BlockChecksum {
checksum, err := z.readUint32()
if err != nil {
return 0, err
}
z.pos += 4
if h := xxh32.ChecksumZero(zdata); h != checksum {
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
}
}
n, err := UncompressBlock(zdata, z.data)
if err != nil {
return 0, err
}
z.data = z.data[:n]
if z.OnBlockDone != nil {
z.OnBlockDone(n)
}
}
if !z.NoChecksum {
_, _ = z.checksum.Write(z.data)
if debugFlag {
debug("current frame checksum %x", z.checksum.Sum32())
}
}
z.idx = 0
}
if z.skip > int64(len(z.data[z.idx:])) {
z.skip -= int64(len(z.data[z.idx:]))
z.dpos += int64(len(z.data[z.idx:]))
z.idx = len(z.data)
return 0, nil
}
z.idx += int(z.skip)
z.dpos += z.skip
z.skip = 0
n := copy(buf, z.data[z.idx:])
z.idx += n
z.dpos += int64(n)
if debugFlag {
debug("copied %d bytes to input", n)
}
return n, nil
}
// Seek implements io.Seeker, but supports seeking forward from the current
// position only. Any other seek will return an error. Allows skipping output
// bytes which aren't needed, which in some scenarios is faster than reading
// and discarding them.
// Note this may cause future calls to Read() to read 0 bytes if all of the
// data they would have returned is skipped.
func (z *Reader) Seek(offset int64, whence int) (int64, error) {
if offset < 0 || whence != io.SeekCurrent {
return z.dpos + z.skip, ErrUnsupportedSeek
}
z.skip += offset
return z.dpos + z.skip, nil
}
// Reset discards the Reader's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) {
z.Header = Header{}
z.pos = 0
z.src = r
z.zdata = z.zdata[:0]
z.data = z.data[:0]
z.idx = 0
z.checksum.Reset()
}
// readUint32 reads an uint32 into the supplied buffer.
// The idea is to make use of the already allocated buffers avoiding additional allocations.
func (z *Reader) readUint32() (uint32, error) {
buf := z.buf[:4]
_, err := io.ReadFull(z.src, buf)
x := binary.LittleEndian.Uint32(buf)
return x, err
}

View File

@ -31,4 +31,6 @@ Temporary Items
# End of https://www.gitignore.io/api/macos
cmd/*/*exe
.idea
.idea
fuzz/*.zip

View File

@ -1,7 +1,7 @@
# lz4 : LZ4 compression in pure Go
[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4)
[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions)
[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
@ -15,13 +15,13 @@ The implementation is based on the reference C [one](https://github.com/lz4/lz4)
Assuming you have the go toolchain installed:
```
go get github.com/pierrec/lz4
go get github.com/pierrec/lz4/v4
```
There is a command line interface tool to compress and decompress LZ4 files.
```
go install github.com/pierrec/lz4/cmd/lz4c
go install github.com/pierrec/lz4/v4/cmd/lz4c
```
Usage
@ -83,24 +83,10 @@ Contributions are very welcome for bug fixing, performance improvements...!
## Contributors
Thanks to all contributors so far:
Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
- [@klauspost](https://github.com/klauspost)
- [@heidawei](https://github.com/heidawei)
- [@x4m](https://github.com/x4m)
- [@Zariel](https://github.com/Zariel)
- [@edwingeng](https://github.com/edwingeng)
- [@danielmoy-google](https://github.com/danielmoy-google)
- [@honda-tatsuya](https://github.com/honda-tatsuya)
- [@h8liu](https://github.com/h8liu)
- [@sbinet](https://github.com/sbinet)
- [@fingon](https://github.com/fingon)
- [@emfree](https://github.com/emfree)
- [@lhemala](https://github.com/lhemala)
- [@connor4312](https://github.com/connor4312)
- [@oov](https://github.com/oov)
- [@arya](https://github.com/arya)
- [@ikkeps](https://github.com/ikkeps)
Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder
Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code
Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.

3
pkg/metadata/vendor/github.com/pierrec/lz4/v4/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/pierrec/lz4/v4
go 1.14

0
pkg/metadata/vendor/github.com/pierrec/lz4/v4/go.sum generated vendored Normal file
View File

View File

@ -1,66 +1,126 @@
package lz4
package lz4block
import (
"encoding/binary"
"fmt"
"math/bits"
"sync"
"github.com/pierrec/lz4/v4/internal/lz4errors"
)
const (
// The following constants are used to setup the compression algorithm.
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
winSizeLog = 16 // LZ4 64Kb window size limit
winSize = 1 << winSizeLog
winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
// hashLog determines the size of the hash table used to quickly find a previous match position.
// Its value influences the compression speed and memory usage, the lower the faster,
// but at the expense of the compression ratio.
// 16 seems to be the best compromise for fast compression.
hashLog = 16
htSize = 1 << hashLog
mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
)
func recoverBlock(e *error) {
if r := recover(); r != nil && *e == nil {
*e = lz4errors.ErrInvalidSourceShortBuffer
}
}
// blockHash hashes the lower 6 bytes into a value < htSize.
func blockHash(x uint64) uint32 {
const prime6bytes = 227718039650203
return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
}
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
func CompressBlockBound(n int) int {
return n + n/255 + 16
}
// UncompressBlock uncompresses the source buffer into the destination one,
// and returns the uncompressed size.
//
// The destination buffer must be sized appropriately.
//
// An error is returned if the source data is invalid or the destination buffer is too small.
func UncompressBlock(src, dst []byte) (int, error) {
func UncompressBlock(src, dst, dict []byte) (int, error) {
if len(src) == 0 {
return 0, nil
}
if di := decodeBlock(dst, src); di >= 0 {
if di := decodeBlock(dst, src, dict); di >= 0 {
return di, nil
}
return 0, ErrInvalidSourceShortBuffer
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
// CompressBlock compresses the source buffer into the destination one.
// This is the fast version of LZ4 compression and also the default one.
// The size of hashTable must be at least 64Kb.
//
// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
defer recoverBlock(&err)
type Compressor struct {
// Offsets are at most 64kiB, so we can store only the lower 16 bits of
// match positions: effectively, an offset from some 64kiB block boundary.
//
// When we retrieve such an offset, we interpret it as relative to the last
// block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
// depending on which of these is inside the current window. If a table
// entry was generated more than 64kiB back in the input, we find out by
// inspecting the input stream.
table [htSize]uint16
// Bitmap indicating which positions in the table are in use.
// This allows us to quickly reset the table for reuse,
// without having to zero everything.
inUse [htSize / 32]uint32
}
// Get returns the position of a presumptive match for the hash h.
// The match may be a false positive due to a hash collision or an old entry.
// If si < winSize, the return value may be negative.
func (c *Compressor) get(h uint32, si int) int {
h &= htSize - 1
i := 0
if c.inUse[h/32]&(1<<(h%32)) != 0 {
i = int(c.table[h])
}
i += si &^ winMask
if i >= si {
// Try previous 64kiB block (negative when in first block).
i -= winSize
}
return i
}
func (c *Compressor) put(h uint32, si int) {
h &= htSize - 1
c.table[h] = uint16(si)
c.inUse[h/32] |= 1 << (h % 32)
}
func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
func CompressBlock(src, dst []byte) (int, error) {
c := compressorPool.Get().(*Compressor)
n, err := c.CompressBlock(src, dst)
compressorPool.Put(c)
return n, err
}
func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
// Zero out reused table to avoid non-deterministic output (issue #65).
c.reset()
// Return 0, nil only if the destination buffer size is < CompressBlockBound.
isNotCompressible := len(dst) < CompressBlockBound(len(src))
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
// This significantly speeds up incompressible data and usually has very small impact on compresssion.
// This significantly speeds up incompressible data and usually has very small impact on compression.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 {
return 0, nil
}
if len(hashTable) < htSize {
return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize)
}
// Prove to the compiler the table has at least htSize elements.
// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
hashTable = hashTable[:htSize]
// si: Current position of the search.
// anchor: Position of the current literals.
var si, anchor int
var si, di, anchor int
sn := len(src) - mfLimit
if sn <= 0 {
goto lastLiterals
}
// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
for si < sn {
@ -71,33 +131,30 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
// We check a match at s, s+1 and s+2 and pick the first one we get.
// Checking 3 only requires us to load the source one.
ref := hashTable[h]
ref2 := hashTable[h2]
hashTable[h] = si
hashTable[h2] = si + 1
ref := c.get(h, si)
ref2 := c.get(h2, si+1)
c.put(h, si)
c.put(h2, si+1)
offset := si - ref
// If offset <= 0 we got an old entry in the hash table.
if offset <= 0 || offset >= winSize || // Out of window.
uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
// No match. Start calculating another hash.
// The processor can usually do this out-of-order.
h = blockHash(match >> 16)
ref = hashTable[h]
ref3 := c.get(h, si+2)
// Check the second match at si+1
si += 1
offset = si - ref2
if offset <= 0 || offset >= winSize ||
uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
// No match. Check the third match at si+2
si += 1
offset = si - ref
hashTable[h] = si
offset = si - ref3
c.put(h, si)
if offset <= 0 || offset >= winSize ||
uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
// Skip one extra byte (at si+3) before we check 3 matches again.
si += 2 + (si-anchor)>>adaptSkipLog
continue
@ -124,7 +181,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
si, mLen = si+mLen, si+minMatch
// Find the longest match by looking by batches of 8 bytes.
for si < sn {
for si+8 <= sn {
x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
if x == 0 {
si += 8
@ -136,6 +193,9 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
}
mLen = si - mLen
if di >= len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
if mLen < 0xF {
dst[di] = byte(mLen)
} else {
@ -149,29 +209,40 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
dst[di] |= 0xF0
di++
l := lLen - 0xF
for ; l >= 0xFF; l -= 0xFF {
for ; l >= 0xFF && di < len(dst); l -= 0xFF {
dst[di] = 0xFF
di++
}
if di >= len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
dst[di] = byte(l)
}
di++
// Literals.
if di+lLen > len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
copy(dst[di:di+lLen], src[anchor:anchor+lLen])
di += lLen + 2
anchor = si
// Encode offset.
_ = dst[di] // Bound check elimination.
if di > len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
// Encode match length part 2.
if mLen >= 0xF {
for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
dst[di] = 0xFF
di++
}
if di >= len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
dst[di] = byte(mLen)
di++
}
@ -181,34 +252,44 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
}
// Hash match end-2
h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
hashTable[h] = si - 2
c.put(h, si-2)
}
if anchor == 0 {
lastLiterals:
if isNotCompressible && anchor == 0 {
// Incompressible.
return 0, nil
}
// Last literals.
if di >= len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
lLen := len(src) - anchor
if lLen < 0xF {
dst[di] = byte(lLen << 4)
} else {
dst[di] = 0xF0
di++
for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
dst[di] = 0xFF
di++
}
if di >= len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
dst[di] = byte(lLen)
}
di++
// Write the last literals.
if di >= anchor {
if isNotCompressible && di >= anchor {
// Incompressible.
return 0, nil
}
if di+len(src)-anchor > len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
di += copy(dst[di:di+len(src)-anchor], src[anchor:])
return di, nil
}
@ -219,37 +300,50 @@ func blockHashHC(x uint32) uint32 {
return x * hasher >> (32 - winSizeLog)
}
// CompressBlockHC compresses the source buffer src into the destination dst
// with max search depth (use 0 or negative value for no max).
//
// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
//
// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
type CompressorHC struct {
// hashTable: stores the last position found for a given hash
// chainTable: stores previous positions for a given hash
hashTable, chainTable [htSize]int
needsReset bool
}
var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
c := compressorHCPool.Get().(*CompressorHC)
n, err := c.CompressBlock(src, dst, depth)
compressorHCPool.Put(c)
return n, err
}
func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
if c.needsReset {
// Zero out reused table to avoid non-deterministic output (issue #65).
c.hashTable = [htSize]int{}
c.chainTable = [htSize]int{}
}
c.needsReset = true // Only false on first call.
defer recoverBlock(&err)
// Return 0, nil only if the destination buffer size is < CompressBlockBound.
isNotCompressible := len(dst) < CompressBlockBound(len(src))
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
// This significantly speeds up incompressible data and usually has very small impact on compresssion.
// This significantly speeds up incompressible data and usually has very small impact on compression.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 {
return 0, nil
var si, di, anchor int
sn := len(src) - mfLimit
if sn <= 0 {
goto lastLiterals
}
var si int
// hashTable: stores the last position found for a given hash
// chainTable: stores previous positions for a given hash
var hashTable, chainTable [winSize]int
if depth <= 0 {
if depth == 0 {
depth = winSize
}
anchor := si
for si < sn {
// Hash the next 4 bytes (sequence).
match := binary.LittleEndian.Uint32(src[si:])
@ -258,7 +352,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
// Follow the chain until out of window and give the longest match.
mLen := 0
offset := 0
for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
// must match to improve on the match length.
if src[next+mLen] != src[si+mLen] {
@ -284,10 +378,9 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
mLen = ml
offset = si - next
// Try another previous position with the same hash.
try--
}
chainTable[si&winMask] = hashTable[h]
hashTable[h] = si
c.chainTable[si&winMask] = c.hashTable[h]
c.hashTable[h] = si
// No match found.
if mLen == 0 {
@ -306,8 +399,8 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
match >>= 8
match |= uint32(src[si+3]) << 24
h := blockHashHC(match)
chainTable[si&winMask] = hashTable[h]
hashTable[h] = si
c.chainTable[si&winMask] = c.hashTable[h]
c.hashTable[h] = si
si++
}
@ -356,12 +449,13 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
}
}
if anchor == 0 {
if isNotCompressible && anchor == 0 {
// Incompressible.
return 0, nil
}
// Last literals.
lastLiterals:
lLen := len(src) - anchor
if lLen < 0xF {
dst[di] = byte(lLen << 4)
@ -378,7 +472,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
di++
// Write the last literals.
if di >= anchor {
if isNotCompressible && di >= anchor {
// Incompressible.
return 0, nil
}

View File

@ -0,0 +1,90 @@
// Package lz4block provides LZ4 BlockSize types and pools of buffers.
package lz4block
import "sync"
const (
Block64Kb uint32 = 1 << (16 + iota*2)
Block256Kb
Block1Mb
Block4Mb
)
// In legacy mode all blocks are compressed regardless
// of the compressed size: use the bound size.
var Block8Mb = uint32(CompressBlockBound(8 << 20))
var (
BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
)
func Index(b uint32) BlockSizeIndex {
switch b {
case Block64Kb:
return 4
case Block256Kb:
return 5
case Block1Mb:
return 6
case Block4Mb:
return 7
case Block8Mb: // only valid in legacy mode
return 3
}
return 0
}
func IsValid(b uint32) bool {
return Index(b) > 0
}
type BlockSizeIndex uint8
func (b BlockSizeIndex) IsValid() bool {
switch b {
case 4, 5, 6, 7:
return true
}
return false
}
func (b BlockSizeIndex) Get() []byte {
var buf interface{}
switch b {
case 4:
buf = BlockPool64K.Get()
case 5:
buf = BlockPool256K.Get()
case 6:
buf = BlockPool1M.Get()
case 7:
buf = BlockPool4M.Get()
case 3:
buf = BlockPool8M.Get()
}
return buf.([]byte)
}
func Put(buf []byte) {
// Safeguard: do not allow invalid buffers.
switch c := cap(buf); uint32(c) {
case Block64Kb:
BlockPool64K.Put(buf[:c])
case Block256Kb:
BlockPool256K.Put(buf[:c])
case Block1Mb:
BlockPool1M.Put(buf[:c])
case Block4Mb:
BlockPool4M.Put(buf[:c])
case Block8Mb:
BlockPool8M.Put(buf[:c])
}
}
type CompressionLevel uint32
const Fast CompressionLevel = 0

View File

@ -2,12 +2,13 @@
// +build gc
// +build !noasm
#include "go_asm.h"
#include "textflag.h"
// AX scratch
// BX scratch
// CX scratch
// DX token
// CX literal and match lengths
// DX token, match offset
//
// DI &dst
// SI &src
@ -16,9 +17,11 @@
// R11 &dst
// R12 short output end
// R13 short input end
// func decodeBlock(dst, src []byte) int
// using 50 bytes of stack currently
TEXT ·decodeBlock(SB), NOSPLIT, $64-56
// R14 &dict
// R15 len(dict)
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOSPLIT, $48-80
MOVQ dst_base+0(FP), DI
MOVQ DI, R11
MOVQ dst_len+8(FP), R8
@ -26,8 +29,13 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R9
CMPQ R9, $0
JE err_corrupt
ADDQ SI, R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
// shortcut ends
// short output end
MOVQ R8, R12
@ -36,28 +44,26 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56
MOVQ R9, R13
SUBQ $16, R13
loop:
// for si < len(src)
CMPQ SI, R9
JGE end
XORL CX, CX
loop:
// token := uint32(src[si])
MOVBQZX (SI), DX
MOVBLZX (SI), DX
INCQ SI
// lit_len = token >> 4
// if lit_len > 0
// CX = lit_len
MOVQ DX, CX
SHRQ $4, CX
MOVL DX, CX
SHRL $4, CX
// if lit_len != 0xF
CMPQ CX, $0xF
JEQ lit_len_loop_pre
CMPL CX, $0xF
JEQ lit_len_loop
CMPQ DI, R12
JGE lit_len_loop_pre
JAE copy_literal
CMPQ SI, R13
JGE lit_len_loop_pre
JAE copy_literal
// copy shortcut
@ -76,28 +82,32 @@ loop:
ADDQ CX, DI
ADDQ CX, SI
MOVQ DX, CX
ANDQ $0xF, CX
MOVL DX, CX
ANDL $0xF, CX
// The second stage: prepare for match copying, decode full info.
// If it doesn't work out, the info won't be wasted.
// offset := uint16(data[:2])
MOVWQZX (SI), DX
MOVWLZX (SI), DX
TESTL DX, DX
JE err_corrupt
ADDQ $2, SI
JC err_short_buf
MOVQ DI, AX
SUBQ DX, AX
JC err_corrupt
CMPQ AX, DI
JGT err_short_buf
JA err_short_buf
// if we can't do the second stage then jump straight to read the
// match length, we already have the offset.
CMPQ CX, $0xF
CMPL CX, $0xF
JEQ match_len_loop_pre
CMPQ DX, $8
CMPL DX, $8
JLT match_len_loop_pre
CMPQ AX, R11
JLT err_short_buf
JB match_len_loop_pre
// memcpy(op + 0, match + 0, 8);
MOVQ (AX), BX
@ -109,72 +119,63 @@ loop:
MOVW 16(AX), BX
MOVW BX, 16(DI)
ADDQ $4, DI // minmatch
ADDQ CX, DI
LEAQ const_minMatch(DI)(CX*1), DI
// shortcut complete, load next token
JMP loop
lit_len_loop_pre:
// if lit_len > 0
CMPQ CX, $0
JEQ offset
CMPQ CX, $0xF
JNE copy_literal
JMP loopcheck
// Read the rest of the literal length:
// do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
lit_len_loop:
// for src[si] == 0xFF
CMPB (SI), $0xFF
JNE lit_len_finalise
CMPQ SI, R9
JAE err_short_buf
// bounds check src[si+1]
MOVQ SI, AX
ADDQ $1, AX
CMPQ AX, R9
JGT err_short_buf
// lit_len += 0xFF
ADDQ $0xFF, CX
MOVBLZX (SI), BX
INCQ SI
JMP lit_len_loop
ADDQ BX, CX
lit_len_finalise:
// lit_len += int(src[si])
// si++
MOVBQZX (SI), AX
ADDQ AX, CX
INCQ SI
CMPB BX, $0xFF
JE lit_len_loop
copy_literal:
// bounds check src and dst
MOVQ SI, AX
ADDQ CX, AX
JC err_short_buf
CMPQ AX, R9
JGT err_short_buf
JA err_short_buf
MOVQ DI, AX
ADDQ CX, AX
CMPQ AX, R8
JGT err_short_buf
MOVQ DI, BX
ADDQ CX, BX
JC err_short_buf
CMPQ BX, R8
JA err_short_buf
// whats a good cut off to call memmove?
CMPQ CX, $16
// Copy literals of <=48 bytes through the XMM registers.
CMPQ CX, $48
JGT memmove_lit
// if len(dst[di:]) < 16
// if len(dst[di:]) < 48
MOVQ R8, AX
SUBQ DI, AX
CMPQ AX, $16
CMPQ AX, $48
JLT memmove_lit
// if len(src[si:]) < 16
MOVQ R9, AX
SUBQ SI, AX
CMPQ AX, $16
// if len(src[si:]) < 48
MOVQ R9, BX
SUBQ SI, BX
CMPQ BX, $48
JLT memmove_lit
MOVOU (SI), X0
MOVOU 16(SI), X1
MOVOU 32(SI), X2
MOVOU X0, (DI)
MOVOU X1, 16(DI)
MOVOU X2, 32(DI)
ADDQ CX, SI
ADDQ CX, DI
JMP finish_lit_copy
@ -183,18 +184,20 @@ memmove_lit:
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
// spill
// Spill registers. Increment SI, DI now so we don't need to save CX.
ADDQ CX, DI
ADDQ CX, SI
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP) // need len to inc SI, DI after
MOVB DX, 48(SP)
MOVL DX, 40(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVB 48(SP), DX
MOVL 40(SP), DX
// recalc initial values
MOVQ dst_base+0(FP), R8
@ -202,77 +205,62 @@ memmove_lit:
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
finish_lit_copy:
ADDQ CX, SI
ADDQ CX, DI
CMPQ SI, R9
JGE end
offset:
// CX := mLen
// free up DX to use for offset
MOVQ DX, CX
MOVL DX, CX
ANDL $0xF, CX
MOVQ SI, AX
ADDQ $2, AX
CMPQ AX, R9
JGT err_short_buf
CMPQ SI, R9
JAE end
// offset
// DX := int(src[si]) | int(src[si+1])<<8
MOVWQZX (SI), DX
// si += 2
// DX := int(src[si-2]) | int(src[si-1])<<8
ADDQ $2, SI
JC err_short_buf
CMPQ SI, R9
JA err_short_buf
MOVWQZX -2(SI), DX
// 0 offset is invalid
CMPQ DX, $0
JEQ err_corrupt
ANDB $0xF, CX
TESTL DX, DX
JEQ err_corrupt
match_len_loop_pre:
// if mlen != 0xF
CMPB CX, $0xF
JNE copy_match
// do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
match_len_loop:
// for src[si] == 0xFF
// lit_len += 0xFF
CMPB (SI), $0xFF
JNE match_len_finalise
CMPQ SI, R9
JAE err_short_buf
// bounds check src[si+1]
MOVQ SI, AX
ADDQ $1, AX
CMPQ AX, R9
JGT err_short_buf
ADDQ $0xFF, CX
MOVBLZX (SI), BX
INCQ SI
JMP match_len_loop
ADDQ BX, CX
match_len_finalise:
// lit_len += int(src[si])
// si++
MOVBQZX (SI), AX
ADDQ AX, CX
INCQ SI
CMPB BX, $0xFF
JE match_len_loop
copy_match:
// mLen += minMatch
ADDQ $4, CX
ADDQ $const_minMatch, CX
// check we have match_len bytes left in dst
// di+match_len < len(dst)
MOVQ DI, AX
ADDQ CX, AX
JC err_short_buf
CMPQ AX, R8
JGT err_short_buf
JA err_short_buf
// DX = offset
// CX = match_len
@ -282,14 +270,14 @@ copy_match:
// check BX is within dst
// if BX < &dst
JC copy_match_from_dict
CMPQ BX, R11
JLT err_short_buf
JBE copy_match_from_dict
// if offset + match_len < di
MOVQ BX, AX
ADDQ CX, AX
LEAQ (BX)(CX*1), AX
CMPQ DI, AX
JGT copy_interior_match
JA copy_interior_match
// AX := len(dst[:di])
// MOVQ DI, AX
@ -309,11 +297,9 @@ copy_match_loop:
INCQ DI
INCQ BX
DECQ CX
JNZ copy_match_loop
CMPQ CX, $0
JGT copy_match_loop
JMP loop
JMP loopcheck
copy_interior_match:
CMPQ CX, $16
@ -329,23 +315,97 @@ copy_interior_match:
MOVOU X0, (DI)
ADDQ CX, DI
JMP loop
XORL CX, CX
JMP loopcheck
copy_match_from_dict:
// CX = match_len
// BX = &dst + (di - offset)
// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
MOVQ R11, AX
SUBQ BX, AX
// BX = len(dict) - dict_bytes_available
MOVQ R15, BX
SUBQ AX, BX
JS err_short_dict
ADDQ R14, BX
// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
CMPQ CX, AX
JLT memmove_match
// The match stretches over the dictionary and our block
// 1) copy what comes from the dictionary
// AX = dict_bytes_available = copy_size
// BX = &dict_end - copy_size
// CX = match_len
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ AX, 16(SP)
// store extra stuff we want to recover
// spill
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 16(SP), AX // copy_size
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX // match_len
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11 // TODO: make these sensible numbers
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
// di+=copy_size
ADDQ AX, DI
// 2) copy the rest from the current block
// CX = match_len - copy_size = rest_size
SUBQ AX, CX
MOVQ R11, BX
// check if we have a copy overlap
// AX = &dst + rest_size
MOVQ CX, AX
ADDQ BX, AX
// if &dst + rest_size > di, copy byte by byte
CMPQ AX, DI
JA copy_match_loop
memmove_match:
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ CX, 16(SP)
// spill
// Spill registers. Increment DI now so we don't need to save CX.
ADDQ CX, DI
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP) // need len to inc SI, DI after
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
// recalc initial values
MOVQ dst_base+0(FP), R8
@ -357,19 +417,32 @@ memmove_match:
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
XORL CX, CX
ADDQ CX, DI
JMP loop
loopcheck:
// for si < len(src)
CMPQ SI, R9
JB loop
end:
// Remaining length must be zero.
TESTQ CX, CX
JNE err_corrupt
SUBQ R11, DI
MOVQ DI, ret+72(FP)
RET
err_corrupt:
MOVQ $-1, ret+48(FP)
MOVQ $-1, ret+72(FP)
RET
err_short_buf:
MOVQ $-2, ret+48(FP)
MOVQ $-2, ret+72(FP)
RET
end:
SUBQ R11, DI
MOVQ DI, ret+48(FP)
err_short_dict:
MOVQ $-3, ret+72(FP)
RET

View File

@ -0,0 +1,231 @@
// +build gc
// +build !noasm
#include "go_asm.h"
#include "textflag.h"
// Register allocation.
#define dst R0
#define dstorig R1
#define src R2
#define dstend R3
#define srcend R4
#define match R5 // Match address.
#define dictend R6
#define token R7
#define len R8 // Literal and match lengths.
#define offset R7 // Match offset; overlaps with token.
#define tmp1 R9
#define tmp2 R11
#define tmp3 R12
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
MOVW dst_base +0(FP), dst
MOVW dst_len +4(FP), dstend
MOVW src_base +12(FP), src
MOVW src_len +16(FP), srcend
CMP $0, srcend
BEQ shortSrc
ADD dst, dstend
ADD src, srcend
MOVW dst, dstorig
loop:
// Read token. Extract literal length.
MOVBU.P 1(src), token
MOVW token >> 4, len
CMP $15, len
BNE readLitlenDone
readLitlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADD.S tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readLitlenLoop
readLitlenDone:
CMP $0, len
BEQ copyLiteralDone
// Bounds check dst+len and src+len.
ADD.S dst, len, tmp1
ADD.CC.S src, len, tmp2
BCS shortSrc
CMP dstend, tmp1
//BHI shortDst // Uncomment for distinct error codes.
CMP.LS srcend, tmp2
BHI shortSrc
// Copy literal.
CMP $4, len
BLO copyLiteralFinish
// Copy 0-3 bytes until src is aligned.
TST $1, src
MOVBU.NE.P 1(src), tmp1
MOVB.NE.P tmp1, 1(dst)
SUB.NE $1, len
TST $2, src
MOVHU.NE.P 2(src), tmp2
MOVB.NE.P tmp2, 1(dst)
MOVW.NE tmp2 >> 8, tmp1
MOVB.NE.P tmp1, 1(dst)
SUB.NE $2, len
B copyLiteralLoopCond
copyLiteralLoop:
// Aligned load, unaligned write.
MOVW.P 4(src), tmp1
MOVW tmp1 >> 8, tmp2
MOVB tmp2, 1(dst)
MOVW tmp1 >> 16, tmp3
MOVB tmp3, 2(dst)
MOVW tmp1 >> 24, tmp2
MOVB tmp2, 3(dst)
MOVB.P tmp1, 4(dst)
copyLiteralLoopCond:
// Loop until len-4 < 0.
SUB.S $4, len
BPL copyLiteralLoop
copyLiteralFinish:
// Copy remaining 0-3 bytes.
// At this point, len may be < 0, but len&3 is still accurate.
TST $1, len
MOVB.NE.P 1(src), tmp3
MOVB.NE.P tmp3, 1(dst)
TST $2, len
MOVB.NE.P 2(src), tmp1
MOVB.NE.P tmp1, 2(dst)
MOVB.NE -1(src), tmp2
MOVB.NE tmp2, -1(dst)
copyLiteralDone:
// Initial part of match length.
// This frees up the token register for reuse as offset.
AND $15, token, len
CMP src, srcend
BEQ end
// Read offset.
ADD.S $2, src
BCS shortSrc
CMP srcend, src
BHI shortSrc
MOVBU -2(src), offset
MOVBU -1(src), tmp1
ORR.S tmp1 << 8, offset
BEQ corrupt
// Read rest of match length.
CMP $15, len
BNE readMatchlenDone
readMatchlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADD.S tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readMatchlenLoop
readMatchlenDone:
// Bounds check dst+len+minMatch.
ADD.S dst, len, tmp1
ADD.CC.S $const_minMatch, tmp1
BCS shortDst
CMP dstend, tmp1
BHI shortDst
RSB dst, offset, match
CMP dstorig, match
BGE copyMatch4
// match < dstorig means the match starts in the dictionary,
// at len(dict) - offset + (dst - dstorig).
MOVW dict_base+24(FP), match
MOVW dict_len +28(FP), dictend
ADD $const_minMatch, len
RSB dst, dstorig, tmp1
RSB dictend, offset, tmp2
ADD.S tmp2, tmp1
BMI shortDict
ADD match, dictend
ADD tmp1, match
copyDict:
MOVBU.P 1(match), tmp1
MOVB.P tmp1, 1(dst)
SUB.S $1, len
CMP.NE match, dictend
BNE copyDict
// If the match extends beyond the dictionary, the rest is at dstorig.
CMP $0, len
BEQ copyMatchDone
MOVW dstorig, match
B copyMatch
// Copy a regular match.
// Since len+minMatch is at least four, we can do a 4× unrolled
// byte copy loop. Using MOVW instead of four byte loads is faster,
// but to remain portable we'd have to align match first, which is
// too expensive. By alternating loads and stores, we also handle
// the case offset < 4.
copyMatch4:
SUB.S $4, len
MOVBU.P 4(match), tmp1
MOVB.P tmp1, 4(dst)
MOVBU -3(match), tmp2
MOVB tmp2, -3(dst)
MOVBU -2(match), tmp3
MOVB tmp3, -2(dst)
MOVBU -1(match), tmp1
MOVB tmp1, -1(dst)
BPL copyMatch4
// Restore len, which is now negative.
ADD.S $4, len
BEQ copyMatchDone
copyMatch:
// Finish with a byte-at-a-time copy.
SUB.S $1, len
MOVBU.P 1(match), tmp2
MOVB.P tmp2, 1(dst)
BNE copyMatch
copyMatchDone:
CMP src, srcend
BNE loop
end:
CMP $0, len
BNE corrupt
SUB dstorig, dst, tmp1
MOVW tmp1, ret+36(FP)
RET
// The error cases have distinct labels so we can put different
// return codes here when debugging, or if the error returns need to
// be changed.
shortDict:
shortDst:
shortSrc:
corrupt:
MOVW $-1, tmp1
MOVW tmp1, ret+36(FP)
RET

View File

@ -0,0 +1,230 @@
// +build gc
// +build !noasm
// This implementation assumes that strict alignment checking is turned off.
// The Go compiler makes the same assumption.
#include "go_asm.h"
#include "textflag.h"
// Register allocation.
#define dst R0
#define dstorig R1
#define src R2
#define dstend R3
#define dstend16 R4 // dstend - 16
#define srcend R5
#define srcend16 R6 // srcend - 16
#define match R7 // Match address.
#define dict R8
#define dictlen R9
#define dictend R10
#define token R11
#define len R12 // Literal and match lengths.
#define lenRem R13
#define offset R14 // Match offset.
#define tmp1 R15
#define tmp2 R16
#define tmp3 R17
#define tmp4 R19
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
LDP dst_base+0(FP), (dst, dstend)
ADD dst, dstend
MOVD dst, dstorig
LDP src_base+24(FP), (src, srcend)
CBZ srcend, shortSrc
ADD src, srcend
// dstend16 = max(dstend-16, 0) and similarly for srcend16.
SUBS $16, dstend, dstend16
CSEL LO, ZR, dstend16, dstend16
SUBS $16, srcend, srcend16
CSEL LO, ZR, srcend16, srcend16
LDP dict_base+48(FP), (dict, dictlen)
ADD dict, dictlen, dictend
loop:
// Read token. Extract literal length.
MOVBU.P 1(src), token
LSR $4, token, len
CMP $15, len
BNE readLitlenDone
readLitlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADDS tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readLitlenLoop
readLitlenDone:
CBZ len, copyLiteralDone
// Bounds check dst+len and src+len.
ADDS dst, len, tmp1
BCS shortSrc
ADDS src, len, tmp2
BCS shortSrc
CMP dstend, tmp1
BHI shortDst
CMP srcend, tmp2
BHI shortSrc
// Copy literal.
SUBS $16, len
BLO copyLiteralShort
copyLiteralLoop:
LDP.P 16(src), (tmp1, tmp2)
STP.P (tmp1, tmp2), 16(dst)
SUBS $16, len
BPL copyLiteralLoop
// Copy (final part of) literal of length 0-15.
// If we have >=16 bytes left in src and dst, just copy 16 bytes.
copyLiteralShort:
CMP dstend16, dst
CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
BHS copyLiteralShortEnd
AND $15, len
LDP (src), (tmp1, tmp2)
ADD len, src
STP (tmp1, tmp2), (dst)
ADD len, dst
B copyLiteralDone
// Safe but slow copy near the end of src, dst.
copyLiteralShortEnd:
TBZ $3, len, 3(PC)
MOVD.P 8(src), tmp1
MOVD.P tmp1, 8(dst)
TBZ $2, len, 3(PC)
MOVW.P 4(src), tmp2
MOVW.P tmp2, 4(dst)
TBZ $1, len, 3(PC)
MOVH.P 2(src), tmp3
MOVH.P tmp3, 2(dst)
TBZ $0, len, 3(PC)
MOVBU.P 1(src), tmp4
MOVB.P tmp4, 1(dst)
copyLiteralDone:
// Initial part of match length.
AND $15, token, len
CMP src, srcend
BEQ end
// Read offset.
ADDS $2, src
BCS shortSrc
CMP srcend, src
BHI shortSrc
MOVHU -2(src), offset
CBZ offset, corrupt
// Read rest of match length.
CMP $15, len
BNE readMatchlenDone
readMatchlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADDS tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readMatchlenLoop
readMatchlenDone:
ADD $const_minMatch, len
// Bounds check dst+len.
ADDS dst, len, tmp2
BCS shortDst
CMP dstend, tmp2
BHI shortDst
SUB offset, dst, match
CMP dstorig, match
BHS copyMatchTry8
// match < dstorig means the match starts in the dictionary,
// at len(dict) - offset + (dst - dstorig).
SUB dstorig, dst, tmp1
SUB offset, dictlen, tmp2
ADDS tmp2, tmp1
BMI shortDict
ADD dict, tmp1, match
copyDict:
MOVBU.P 1(match), tmp3
MOVB.P tmp3, 1(dst)
SUBS $1, len
CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
BNE copyDict
CBZ len, copyMatchDone
// If the match extends beyond the dictionary, the rest is at dstorig.
// Recompute the offset for the next check.
MOVD dstorig, match
SUB dstorig, dst, offset
copyMatchTry8:
// Copy doublewords if both len and offset are at least eight.
// A 16-at-a-time loop doesn't provide a further speedup.
CMP $8, len
CCMP HS, offset, $8, $0
BLO copyMatchLoop1
AND $7, len, lenRem
SUB $8, len
copyMatchLoop8:
MOVD.P 8(match), tmp1
MOVD.P tmp1, 8(dst)
SUBS $8, len
BPL copyMatchLoop8
MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
ADD lenRem, dst
MOVD $0, len
MOVD tmp2, -8(dst)
B copyMatchDone
copyMatchLoop1:
// Byte-at-a-time copy for small offsets.
MOVBU.P 1(match), tmp2
MOVB.P tmp2, 1(dst)
SUBS $1, len
BNE copyMatchLoop1
copyMatchDone:
CMP src, srcend
BNE loop
end:
CBNZ len, corrupt
SUB dstorig, dst, tmp1
MOVD tmp1, ret+72(FP)
RET
// The error cases have distinct labels so we can put different
// return codes here when debugging, or if the error returns need to
// be changed.
shortDict:
shortDst:
shortSrc:
corrupt:
MOVD $-1, tmp1
MOVD tmp1, ret+72(FP)
RET

View File

@ -0,0 +1,10 @@
//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
// +build amd64 arm arm64
// +build !appengine
// +build gc
// +build !noasm
package lz4block
//go:noescape
func decodeBlock(dst, src, dict []byte) int

View File

@ -0,0 +1,139 @@
//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
// +build !amd64,!arm,!arm64 appengine !gc noasm
package lz4block
import (
"encoding/binary"
)
func decodeBlock(dst, src, dict []byte) (ret int) {
// Restrict capacities so we don't read or write out of bounds.
dst = dst[:len(dst):len(dst)]
src = src[:len(src):len(src)]
const hasError = -2
if len(src) == 0 {
return hasError
}
defer func() {
if recover() != nil {
ret = hasError
}
}()
var si, di uint
for si < uint(len(src)) {
// Literals and match lengths (token).
b := uint(src[si])
si++
// Literals.
if lLen := b >> 4; lLen > 0 {
switch {
case lLen < 0xF && si+16 < uint(len(src)):
// Shortcut 1
// if we have enough room in src and dst, and the literals length
// is small enough (0..14) then copy all 16 bytes, even if not all
// are part of the literals.
copy(dst[di:], src[si:si+16])
si += lLen
di += lLen
if mLen := b & 0xF; mLen < 0xF {
// Shortcut 2
// if the match length (4..18) fits within the literals, then copy
// all 18 bytes, even if not all are part of the literals.
mLen += 4
if offset := u16(src[si:]); mLen <= offset && offset < di {
i := di - offset
// The remaining buffer may not hold 18 bytes.
// See https://github.com/pierrec/lz4/issues/51.
if end := i + 18; end <= uint(len(dst)) {
copy(dst[di:], dst[i:end])
si += 2
di += mLen
continue
}
}
}
case lLen == 0xF:
for {
x := uint(src[si])
if lLen += x; int(lLen) < 0 {
return hasError
}
si++
if x != 0xFF {
break
}
}
fallthrough
default:
copy(dst[di:di+lLen], src[si:si+lLen])
si += lLen
di += lLen
}
}
mLen := b & 0xF
if si == uint(len(src)) && mLen == 0 {
break
} else if si >= uint(len(src)) {
return hasError
}
offset := u16(src[si:])
if offset == 0 {
return hasError
}
si += 2
// Match.
mLen += minMatch
if mLen == minMatch+0xF {
for {
x := uint(src[si])
if mLen += x; int(mLen) < 0 {
return hasError
}
si++
if x != 0xFF {
break
}
}
}
// Copy the match.
if di < offset {
// The match is beyond our block, meaning the first part
// is in the dictionary.
fromDict := dict[uint(len(dict))+di-offset:]
n := uint(copy(dst[di:di+mLen], fromDict))
di += n
if mLen -= n; mLen == 0 {
continue
}
// We copied n = offset-di bytes from the dictionary,
// then set di = di+n = offset, so the following code
// copies from dst[di-offset:] = dst[0:].
}
expanded := dst[di-offset:]
if mLen > offset {
// Efficiently copy the match dst[di-offset:di] into the dst slice.
bytesToCopy := offset * (mLen / offset)
for n := offset; n <= bytesToCopy+offset; n *= 2 {
copy(expanded[n:], expanded[:n])
}
di += bytesToCopy
mLen -= bytesToCopy
}
di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
}
return int(di)
}
func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }

View File

@ -0,0 +1,19 @@
package lz4errors
type Error string
func (e Error) Error() string { return string(e) }
const (
ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short"
ErrInvalidFrame Error = "lz4: bad magic number"
ErrInternalUnhandledState Error = "lz4: unhandled state"
ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum"
ErrInvalidBlockChecksum Error = "lz4: invalid block checksum"
ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum"
ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object"
ErrOptionInvalidBlockSize Error = "lz4: invalid block size"
ErrOptionNotApplicable Error = "lz4: option not applicable"
ErrWriterNotClosed Error = "lz4: writer not closed"
)

View File

@ -0,0 +1,350 @@
package lz4stream
import (
"encoding/binary"
"fmt"
"io"
"sync"
"github.com/pierrec/lz4/v4/internal/lz4block"
"github.com/pierrec/lz4/v4/internal/lz4errors"
"github.com/pierrec/lz4/v4/internal/xxh32"
)
type Blocks struct {
Block *FrameDataBlock
Blocks chan chan *FrameDataBlock
mu sync.Mutex
err error
}
func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
if num == 1 {
b.Blocks = nil
b.Block = NewFrameDataBlock(f)
return
}
b.Block = nil
if cap(b.Blocks) != num {
b.Blocks = make(chan chan *FrameDataBlock, num)
}
// goroutine managing concurrent block compression goroutines.
go func() {
// Process next block compression item.
for c := range b.Blocks {
// Read the next compressed block result.
// Waiting here ensures that the blocks are output in the order they were sent.
// The incoming channel is always closed as it indicates to the caller that
// the block has been processed.
block := <-c
if block == nil {
// Notify the block compression routine that we are done with its result.
// This is used when a sentinel block is sent to terminate the compression.
close(c)
return
}
// Do not attempt to write the block upon any previous failure.
if b.err == nil {
// Write the block.
if err := block.Write(f, dst); err != nil {
// Keep the first error.
b.err = err
// All pending compression goroutines need to shut down, so we need to keep going.
}
}
close(c)
}
}()
}
func (b *Blocks) close(f *Frame, num int) error {
if num == 1 {
if b.Block != nil {
b.Block.Close(f)
}
err := b.err
b.err = nil
return err
}
if b.Blocks == nil {
err := b.err
b.err = nil
return err
}
c := make(chan *FrameDataBlock)
b.Blocks <- c
c <- nil
<-c
err := b.err
b.err = nil
return err
}
// ErrorR returns any error set while uncompressing a stream.
func (b *Blocks) ErrorR() error {
b.mu.Lock()
defer b.mu.Unlock()
return b.err
}
// initR returns a channel that streams the uncompressed blocks if in concurrent
// mode and no error. When the channel is closed, check for any error with b.ErrorR.
//
// If not in concurrent mode, the uncompressed block is b.Block and the returned error
// needs to be checked.
func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
size := f.Descriptor.Flags.BlockSizeIndex()
if num == 1 {
b.Blocks = nil
b.Block = NewFrameDataBlock(f)
return nil, nil
}
b.Block = nil
blocks := make(chan chan []byte, num)
// data receives the uncompressed blocks.
data := make(chan []byte)
// Read blocks from the source sequentially
// and uncompress them concurrently.
// In legacy mode, accrue the uncompress sizes in cum.
var cum uint32
go func() {
var cumx uint32
var err error
for b.ErrorR() == nil {
block := NewFrameDataBlock(f)
cumx, err = block.Read(f, src, 0)
if err != nil {
block.Close(f)
break
}
// Recheck for an error as reading may be slow and uncompressing is expensive.
if b.ErrorR() != nil {
block.Close(f)
break
}
c := make(chan []byte)
blocks <- c
go func() {
defer block.Close(f)
data, err := block.Uncompress(f, size.Get(), nil, false)
if err != nil {
b.closeR(err)
// Close the block channel to indicate an error.
close(c)
} else {
c <- data
}
}()
}
// End the collection loop and the data channel.
c := make(chan []byte)
blocks <- c
c <- nil // signal the collection loop that we are done
<-c // wait for the collect loop to complete
if f.isLegacy() && cum == cumx {
err = io.EOF
}
b.closeR(err)
close(data)
}()
// Collect the uncompressed blocks and make them available
// on the returned channel.
go func(leg bool) {
defer close(blocks)
skipBlocks := false
for c := range blocks {
buf, ok := <-c
if !ok {
// A closed channel indicates an error.
// All remaining channels should be discarded.
skipBlocks = true
continue
}
if buf == nil {
// Signal to end the loop.
close(c)
return
}
if skipBlocks {
// A previous error has occurred, skipping remaining channels.
continue
}
// Perform checksum now as the blocks are received in order.
if f.Descriptor.Flags.ContentChecksum() {
_, _ = f.checksum.Write(buf)
}
if leg {
cum += uint32(len(buf))
}
data <- buf
close(c)
}
}(f.isLegacy())
return data, nil
}
// closeR safely sets the error on b if not already set.
func (b *Blocks) closeR(err error) {
b.mu.Lock()
if b.err == nil {
b.err = err
}
b.mu.Unlock()
}
func NewFrameDataBlock(f *Frame) *FrameDataBlock {
buf := f.Descriptor.Flags.BlockSizeIndex().Get()
return &FrameDataBlock{Data: buf, data: buf}
}
type FrameDataBlock struct {
Size DataBlockSize
Data []byte // compressed or uncompressed data (.data or .src)
Checksum uint32
data []byte // buffer for compressed data
src []byte // uncompressed data
err error // used in concurrent mode
}
func (b *FrameDataBlock) Close(f *Frame) {
b.Size = 0
b.Checksum = 0
b.err = nil
if b.data != nil {
// Block was not already closed.
lz4block.Put(b.data)
b.Data = nil
b.data = nil
b.src = nil
}
}
// Block compression errors are ignored since the buffer is sized appropriately.
func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
data := b.data
if f.isLegacy() {
// In legacy mode, the buffer is sized according to CompressBlockBound,
// but only 8Mb is buffered for compression.
src = src[:8<<20]
} else {
data = data[:len(src)] // trigger the incompressible flag in CompressBlock
}
var n int
switch level {
case lz4block.Fast:
n, _ = lz4block.CompressBlock(src, data)
default:
n, _ = lz4block.CompressBlockHC(src, data, level)
}
if n == 0 {
b.Size.UncompressedSet(true)
b.Data = src
} else {
b.Size.UncompressedSet(false)
b.Data = data[:n]
}
b.Size.sizeSet(len(b.Data))
b.src = src // keep track of the source for content checksum
if f.Descriptor.Flags.BlockChecksum() {
b.Checksum = xxh32.ChecksumZero(src)
}
return b
}
func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
// Write is called in the same order as blocks are compressed,
// so content checksum must be done here.
if f.Descriptor.Flags.ContentChecksum() {
_, _ = f.checksum.Write(b.src)
}
buf := f.buf[:]
binary.LittleEndian.PutUint32(buf, uint32(b.Size))
if _, err := dst.Write(buf[:4]); err != nil {
return err
}
if _, err := dst.Write(b.Data); err != nil {
return err
}
if b.Checksum == 0 {
return nil
}
binary.LittleEndian.PutUint32(buf, b.Checksum)
_, err := dst.Write(buf[:4])
return err
}
// Read updates b with the next block data, size and checksum if available.
func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
x, err := f.readUint32(src)
if err != nil {
return 0, err
}
if f.isLegacy() {
switch x {
case frameMagicLegacy:
// Concatenated legacy frame.
return b.Read(f, src, cum)
case cum:
// Only works in non concurrent mode, for concurrent mode
// it is handled separately.
// Linux kernel format appends the total uncompressed size at the end.
return 0, io.EOF
}
} else if x == 0 {
// Marker for end of stream.
return 0, io.EOF
}
b.Size = DataBlockSize(x)
size := b.Size.size()
if size > cap(b.data) {
return x, lz4errors.ErrOptionInvalidBlockSize
}
b.data = b.data[:size]
if _, err := io.ReadFull(src, b.data); err != nil {
return x, err
}
if f.Descriptor.Flags.BlockChecksum() {
sum, err := f.readUint32(src)
if err != nil {
return 0, err
}
b.Checksum = sum
}
return x, nil
}
func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
if b.Size.Uncompressed() {
n := copy(dst, b.data)
dst = dst[:n]
} else {
n, err := lz4block.UncompressBlock(b.data, dst, dict)
if err != nil {
return nil, err
}
dst = dst[:n]
}
if f.Descriptor.Flags.BlockChecksum() {
if c := xxh32.ChecksumZero(dst); c != b.Checksum {
err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
return nil, err
}
}
if sum && f.Descriptor.Flags.ContentChecksum() {
_, _ = f.checksum.Write(dst)
}
return dst, nil
}
func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
return
}
x = binary.LittleEndian.Uint32(f.buf[:4])
return
}

View File

@ -0,0 +1,204 @@
// Package lz4stream provides the types that support reading and writing LZ4 data streams.
package lz4stream
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"github.com/pierrec/lz4/v4/internal/lz4block"
"github.com/pierrec/lz4/v4/internal/lz4errors"
"github.com/pierrec/lz4/v4/internal/xxh32"
)
//go:generate go run gen.go
const (
frameMagic uint32 = 0x184D2204
frameSkipMagic uint32 = 0x184D2A50
frameMagicLegacy uint32 = 0x184C2102
)
func NewFrame() *Frame {
return &Frame{}
}
type Frame struct {
buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
Magic uint32
Descriptor FrameDescriptor
Blocks Blocks
Checksum uint32
checksum xxh32.XXHZero
}
// Reset allows reusing the Frame.
// The Descriptor configuration is not modified.
func (f *Frame) Reset(num int) {
f.Magic = 0
f.Descriptor.Checksum = 0
f.Descriptor.ContentSize = 0
_ = f.Blocks.close(f, num)
f.Checksum = 0
}
func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
if legacy {
f.Magic = frameMagicLegacy
idx := lz4block.Index(lz4block.Block8Mb)
f.Descriptor.Flags.BlockSizeIndexSet(idx)
} else {
f.Magic = frameMagic
f.Descriptor.initW()
}
f.Blocks.initW(f, dst, num)
f.checksum.Reset()
}
func (f *Frame) CloseW(dst io.Writer, num int) error {
if err := f.Blocks.close(f, num); err != nil {
return err
}
if f.isLegacy() {
return nil
}
buf := f.buf[:0]
// End mark (data block size of uint32(0)).
buf = append(buf, 0, 0, 0, 0)
if f.Descriptor.Flags.ContentChecksum() {
buf = f.checksum.Sum(buf)
}
_, err := dst.Write(buf)
return err
}
func (f *Frame) isLegacy() bool {
return f.Magic == frameMagicLegacy
}
func (f *Frame) ParseHeaders(src io.Reader) error {
if f.Magic > 0 {
// Header already read.
return nil
}
newFrame:
var err error
if f.Magic, err = f.readUint32(src); err != nil {
return err
}
switch m := f.Magic; {
case m == frameMagic || m == frameMagicLegacy:
// All 16 values of frameSkipMagic are valid.
case m>>8 == frameSkipMagic>>8:
skip, err := f.readUint32(src)
if err != nil {
return err
}
if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
return err
}
goto newFrame
default:
return lz4errors.ErrInvalidFrame
}
if err := f.Descriptor.initR(f, src); err != nil {
return err
}
f.checksum.Reset()
return nil
}
func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
return f.Blocks.initR(f, num, src)
}
func (f *Frame) CloseR(src io.Reader) (err error) {
if f.isLegacy() {
return nil
}
if !f.Descriptor.Flags.ContentChecksum() {
return nil
}
if f.Checksum, err = f.readUint32(src); err != nil {
return err
}
if c := f.checksum.Sum32(); c != f.Checksum {
return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
}
return nil
}
type FrameDescriptor struct {
Flags DescriptorFlags
ContentSize uint64
Checksum uint8
}
func (fd *FrameDescriptor) initW() {
fd.Flags.VersionSet(1)
fd.Flags.BlockIndependenceSet(true)
}
func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
if fd.Checksum > 0 {
// Header already written.
return nil
}
buf := f.buf[:4]
// Write the magic number here even though it belongs to the Frame.
binary.LittleEndian.PutUint32(buf, f.Magic)
if !f.isLegacy() {
buf = buf[:4+2]
binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
if fd.Flags.Size() {
buf = buf[:4+2+8]
binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
}
fd.Checksum = descriptorChecksum(buf[4:])
buf = append(buf, fd.Checksum)
}
_, err := dst.Write(buf)
return err
}
func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
if f.isLegacy() {
idx := lz4block.Index(lz4block.Block8Mb)
f.Descriptor.Flags.BlockSizeIndexSet(idx)
return nil
}
// Read the flags and the checksum, hoping that there is not content size.
buf := f.buf[:3]
if _, err := io.ReadFull(src, buf); err != nil {
return err
}
descr := binary.LittleEndian.Uint16(buf)
fd.Flags = DescriptorFlags(descr)
if fd.Flags.Size() {
// Append the 8 missing bytes.
buf = buf[:3+8]
if _, err := io.ReadFull(src, buf[3:]); err != nil {
return err
}
fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
}
fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
buf = buf[:len(buf)-1] // all descriptor fields except checksum
if c := descriptorChecksum(buf); fd.Checksum != c {
return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
}
// Validate the elements that can be.
if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
return lz4errors.ErrOptionInvalidBlockSize
}
return nil
}
func descriptorChecksum(buf []byte) byte {
return byte(xxh32.ChecksumZero(buf) >> 8)
}

Some files were not shown because too many files have changed in this diff Show More