Use goreleaser to build and release (#244)

Instead of using gox on one side and an action to release, we can merge
them together with goreleaser which will build for extra targets (arm,
mips if needed in the future) and it also takes care of creating
checksums, a source archive, and a changelog and creating a release with
all the artifacts.

All binaries should respect the old naming convention, so any scripts
out there should still work.

Signed-off-by: Itxaka <igarcia@suse.com>
This commit is contained in:
Itxaka
2021-08-11 08:30:55 +02:00
committed by GitHub
parent 0a4fe57f33
commit 4adc0dc9b9
1133 changed files with 81678 additions and 85598 deletions

View File

@@ -14,35 +14,52 @@ import (
)
type blockEnc struct {
size int
literals []byte
sequences []seq
coders seqCoders
litEnc *huff0.Scratch
wr bitWriter
extraLits int
last bool
size int
literals []byte
sequences []seq
coders seqCoders
litEnc *huff0.Scratch
dictLitEnc *huff0.Scratch
wr bitWriter
extraLits int
output []byte
recentOffsets [3]uint32
prevRecentOffsets [3]uint32
last bool
lowMem bool
}
// init should be used once the block has been created.
// If called more than once, the effect is the same as calling reset.
func (b *blockEnc) init() {
if cap(b.literals) < maxCompressedLiteralSize {
b.literals = make([]byte, 0, maxCompressedLiteralSize)
}
const defSeqs = 200
b.literals = b.literals[:0]
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
if cap(b.output) < maxCompressedBlockSize {
b.output = make([]byte, 0, maxCompressedBlockSize)
if b.lowMem {
// 1K literals
if cap(b.literals) < 1<<10 {
b.literals = make([]byte, 0, 1<<10)
}
const defSeqs = 20
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
// 1K
if cap(b.output) < 1<<10 {
b.output = make([]byte, 0, 1<<10)
}
} else {
if cap(b.literals) < maxCompressedBlockSize {
b.literals = make([]byte, 0, maxCompressedBlockSize)
}
const defSeqs = 200
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
if cap(b.output) < maxCompressedBlockSize {
b.output = make([]byte, 0, maxCompressedBlockSize)
}
}
if b.coders.mlEnc == nil {
b.coders.mlEnc = &fseEncoder{}
b.coders.mlPrev = &fseEncoder{}
@@ -51,7 +68,7 @@ func (b *blockEnc) init() {
b.coders.llEnc = &fseEncoder{}
b.coders.llPrev = &fseEncoder{}
}
b.litEnc = &huff0.Scratch{}
b.litEnc = &huff0.Scratch{WantLogLess: 4}
b.reset(nil)
}
@@ -75,6 +92,7 @@ func (b *blockEnc) reset(prev *blockEnc) {
if prev != nil {
b.recentOffsets = prev.prevRecentOffsets
}
b.dictLitEnc = nil
}
// reset will reset the block for a new encode, but in the same stream,
@@ -295,24 +313,38 @@ func (b *blockEnc) encodeRaw(a []byte) {
b.output = bh.appendTo(b.output[:0])
b.output = append(b.output, a...)
if debug {
println("Adding RAW block, length", len(a))
println("Adding RAW block, length", len(a), "last:", b.last)
}
}
// encodeLits can be used if the block is only litLen.
func (b *blockEnc) encodeLits() error {
// encodeRaw can be used to set the output to a raw representation of supplied bytes.
func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(b.literals)))
bh.setSize(uint32(len(src)))
bh.setType(blockTypeRaw)
dst = bh.appendTo(dst)
dst = append(dst, src...)
if debug {
println("Adding RAW block, length", len(src), "last:", b.last)
}
return dst
}
// encodeLits can be used if the block is only litLen.
func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(lits)))
// Don't compress extremely small blocks
if len(b.literals) < 32 {
if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
if debug {
println("Adding RAW block, length", len(b.literals))
println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
b.output = append(b.output, b.literals...)
b.output = append(b.output, lits...)
return nil
}
@@ -321,21 +353,18 @@ func (b *blockEnc) encodeLits() error {
reUsed, single bool
err error
)
if len(b.literals) >= 1024 {
if b.dictLitEnc != nil {
b.litEnc.TransferCTable(b.dictLitEnc)
b.litEnc.Reuse = huff0.ReusePolicyAllow
b.dictLitEnc = nil
}
if len(lits) >= 1024 {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
if len(out) > len(b.literals)-len(b.literals)>>4 {
// Bail out of compression is too little.
err = huff0.ErrIncompressible
}
} else if len(b.literals) > 32 {
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
} else if len(lits) > 32 {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
if len(out) > len(b.literals)-len(b.literals)>>4 {
// Bail out of compression is too little.
err = huff0.ErrIncompressible
}
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
@@ -343,23 +372,23 @@ func (b *blockEnc) encodeLits() error {
switch err {
case huff0.ErrIncompressible:
if debug {
println("Adding RAW block, length", len(b.literals))
println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
b.output = append(b.output, b.literals...)
b.output = append(b.output, lits...)
return nil
case huff0.ErrUseRLE:
if debug {
println("Adding RLE block, length", len(b.literals))
println("Adding RLE block, length", len(lits))
}
bh.setType(blockTypeRLE)
b.output = bh.appendTo(b.output)
b.output = append(b.output, b.literals[0])
b.output = append(b.output, lits[0])
return nil
case nil:
default:
return err
case nil:
}
// Compressed...
// Now, allow reuse
@@ -378,7 +407,7 @@ func (b *blockEnc) encodeLits() error {
lh.setType(literalsBlockCompressed)
}
// Set sizes
lh.setSizes(len(out), len(b.literals), single)
lh.setSizes(len(out), len(lits), single)
bh.setSize(uint32(len(out) + lh.size() + 1))
// Write block headers.
@@ -391,20 +420,74 @@ func (b *blockEnc) encodeLits() error {
return nil
}
// encode will encode the block and put the output in b.output.
func (b *blockEnc) encode() error {
if len(b.sequences) == 0 {
return b.encodeLits()
// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder(data []byte) int {
if len(data) > maxSequences || len(data) < 2 {
return 0
}
// We want some difference
if len(b.literals) > (b.size - (b.size >> 5)) {
return errIncompressible
enc := fseEncoder{}
hist := enc.Histogram()[:256]
maxSym := uint8(0)
for i, v := range data {
v = v & 63
data[i] = v
hist[v]++
if v > maxSym {
maxSym = v
}
}
if maxSym == 0 {
// All 0
return 0
}
maxCount := func(a []uint32) int {
var max uint32
for _, v := range a {
if v > max {
max = v
}
}
return int(max)
}
cnt := maxCount(hist[:maxSym])
if cnt == len(data) {
// RLE
return 0
}
enc.HistogramFinished(maxSym, cnt)
err := enc.normalizeCount(len(data))
if err != nil {
return 0
}
_, err = enc.writeCount(nil)
if err != nil {
panic(err)
}
return 1
}
// encode will encode the block and append the output in b.output.
// Previous offset codes must be pushed if more blocks are expected.
func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
return b.encodeLits(b.literals, rawAllLits)
}
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 5)
if saved < 16 {
if org == nil {
return errIncompressible
}
b.popOffsets()
return b.encodeLits(org, rawAllLits)
}
var bh blockHeader
var lh literalsHeader
bh.setLast(b.last)
bh.setType(blockTypeCompressed)
// Store offset of the block header. Needed when we know the size.
bhOffset := len(b.output)
b.output = bh.appendTo(b.output)
var (
@@ -412,22 +495,22 @@ func (b *blockEnc) encode() error {
reUsed, single bool
err error
)
if len(b.literals) >= 1024 {
if b.dictLitEnc != nil {
b.litEnc.TransferCTable(b.dictLitEnc)
b.litEnc.Reuse = huff0.ReusePolicyAllow
b.dictLitEnc = nil
}
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
if len(out) > len(b.literals)-len(b.literals)>>4 {
err = huff0.ErrIncompressible
}
} else if len(b.literals) > 32 {
} else if len(b.literals) > 32 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
if len(out) > len(b.literals)-len(b.literals)>>4 {
err = huff0.ErrIncompressible
}
} else {
err = huff0.ErrIncompressible
}
switch err {
case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw)
@@ -445,11 +528,6 @@ func (b *blockEnc) encode() error {
if debug {
println("Adding literals RLE")
}
default:
if debug {
println("Adding literals ERROR:", err)
}
return err
case nil:
// Compressed litLen...
if reUsed {
@@ -480,6 +558,11 @@ func (b *blockEnc) encode() error {
if debug {
println("Adding literals compressed")
}
default:
if debug {
println("Adding literals ERROR:", err)
}
return err
}
// Sequence compression
@@ -695,23 +778,23 @@ func (b *blockEnc) encode() error {
}
b.output = wr.out
if len(b.output)-3 >= b.size {
if len(b.output)-3-bhOffset >= b.size {
// Maybe even add a bigger margin.
b.litEnc.Reuse = huff0.ReusePolicyNone
return errIncompressible
}
// Size is output minus block header.
bh.setSize(uint32(len(b.output)) - 3)
bh.setSize(uint32(len(b.output)-bhOffset) - 3)
if debug {
println("Rewriting block header", bh)
}
_ = bh.appendTo(b.output[:0])
_ = bh.appendTo(b.output[bhOffset:bhOffset])
b.coders.setPrev(llEnc, mlEnc, ofEnc)
return nil
}
var errIncompressible = errors.New("uncompressible")
var errIncompressible = errors.New("incompressible")
func (b *blockEnc) genCodes() {
if len(b.sequences) == 0 {
@@ -757,7 +840,7 @@ func (b *blockEnc) genCodes() {
mlH[v]++
if v > mlMax {
mlMax = v
if debug && mlMax > maxMatchLengthSymbol {
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
}
}
@@ -772,13 +855,13 @@ func (b *blockEnc) genCodes() {
}
return int(max)
}
if mlMax > maxMatchLengthSymbol {
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
if ofMax > maxOffsetBits {
if debugAsserts && ofMax > maxOffsetBits {
panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
}
if llMax > maxLiteralLengthSymbol {
if debugAsserts && llMax > maxLiteralLengthSymbol {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}