Add brotli and zstd content-encoding
This commit is contained in:
parent
495f9b2d35
commit
ac2a20094f
1
go.mod
1
go.mod
@ -6,6 +6,7 @@ require (
|
||||
github.com/99designs/gqlgen v0.17.36
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/adhocore/gronx v1.6.5
|
||||
github.com/andybalholm/brotli v1.1.0
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1
|
||||
github.com/caddyserver/certmagic v0.19.2
|
||||
github.com/casbin/casbin/v2 v2.77.2
|
||||
|
||||
2
go.sum
2
go.sum
@ -18,6 +18,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
|
||||
52
http/middleware/compress/brotli.go
Normal file
52
http/middleware/compress/brotli.go
Normal file
@ -0,0 +1,52 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
)
|
||||
|
||||
type brotliImpl struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func NewBrotli(level Level) Compression {
|
||||
brotliLevel := brotli.DefaultCompression
|
||||
if level == BestCompression {
|
||||
brotliLevel = brotli.BestCompression
|
||||
} else {
|
||||
brotliLevel = brotli.BestSpeed
|
||||
}
|
||||
|
||||
g := &brotliImpl{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return brotli.NewWriterLevel(io.Discard, brotliLevel)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *brotliImpl) Acquire() Compressor {
|
||||
c := g.pool.Get()
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
x, ok := c.(Compressor)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
x.Reset(io.Discard)
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func (g *brotliImpl) Release(c Compressor) {
|
||||
c.Reset(io.Discard)
|
||||
g.pool.Put(c)
|
||||
}
|
||||
@ -1,35 +1,47 @@
|
||||
package gzip
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/labstack/echo/v4/middleware"
|
||||
)
|
||||
|
||||
// Config defines the config for Gzip middleware.
|
||||
// Config defines the config for compress middleware.
|
||||
type Config struct {
|
||||
// Skipper defines a function to skip middleware.
|
||||
Skipper middleware.Skipper
|
||||
|
||||
// Gzip compression level.
|
||||
// Compression level.
|
||||
// Optional. Default value -1.
|
||||
Level int
|
||||
Level Level
|
||||
|
||||
// Length threshold before gzip compression
|
||||
// Length threshold before compression
|
||||
// is used. Optional. Default value 0
|
||||
MinLength int
|
||||
}
|
||||
|
||||
type zstdResponseWriter struct {
|
||||
*zstd.Encoder
|
||||
type Compression interface {
|
||||
Acquire() Compressor
|
||||
Release(c Compressor)
|
||||
}
|
||||
|
||||
type Compressor interface {
|
||||
Write(p []byte) (int, error)
|
||||
Flush() error
|
||||
Reset(w io.Writer)
|
||||
Close() error
|
||||
}
|
||||
|
||||
type compressResponseWriter struct {
|
||||
Compressor
|
||||
http.ResponseWriter
|
||||
wroteHeader bool
|
||||
wroteBody bool
|
||||
@ -38,14 +50,27 @@ type zstdResponseWriter struct {
|
||||
buffer *bytes.Buffer
|
||||
code int
|
||||
headerContentLength string
|
||||
scheme string
|
||||
}
|
||||
|
||||
const scheme = "zstd"
|
||||
type Scheme string
|
||||
|
||||
func (s Scheme) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
const (
|
||||
BestCompression = int(zstd.SpeedBestCompression)
|
||||
BestSpeed = int(zstd.SpeedFastest)
|
||||
DefaultCompression = int(zstd.SpeedDefault)
|
||||
GzipScheme Scheme = "gzip"
|
||||
BrotliScheme Scheme = "br"
|
||||
ZstdScheme Scheme = "zstd"
|
||||
)
|
||||
|
||||
type Level int
|
||||
|
||||
const (
|
||||
DefaultCompression Level = 0
|
||||
BestCompression Level = 1
|
||||
BestSpeed Level = 2
|
||||
)
|
||||
|
||||
// DefaultConfig is the default Gzip middleware config.
|
||||
@ -78,13 +103,13 @@ func ContentTypeSkipper(contentTypes []string) middleware.Skipper {
|
||||
}
|
||||
}
|
||||
|
||||
// New returns a middleware which compresses HTTP response using gzip compression
|
||||
// New returns a middleware which compresses HTTP response using a compression
|
||||
// scheme.
|
||||
func New() echo.MiddlewareFunc {
|
||||
return NewWithConfig(DefaultConfig)
|
||||
}
|
||||
|
||||
// NewWithConfig return Gzip middleware with config.
|
||||
// NewWithConfig return compress middleware with config.
|
||||
// See: `New()`.
|
||||
func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
// Defaults
|
||||
@ -100,7 +125,9 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
config.MinLength = DefaultConfig.MinLength
|
||||
}
|
||||
|
||||
pool := zstdPool(config)
|
||||
gzipPool := NewGzip(config.Level)
|
||||
brotliPool := NewBrotli(config.Level)
|
||||
zstdPool := NewZstd(config.Level)
|
||||
bpool := bufferPool()
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
@ -111,12 +138,26 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
|
||||
res := c.Response()
|
||||
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
|
||||
encodings := c.Request().Header.Get(echo.HeaderAcceptEncoding)
|
||||
|
||||
if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), scheme) {
|
||||
i := pool.Get()
|
||||
w, ok := i.(*zstd.Encoder)
|
||||
if !ok {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, i.(error).Error())
|
||||
var pool Compression
|
||||
var scheme Scheme
|
||||
|
||||
if strings.Contains(encodings, ZstdScheme.String()) {
|
||||
pool = zstdPool
|
||||
scheme = ZstdScheme
|
||||
} else if strings.Contains(encodings, BrotliScheme.String()) {
|
||||
pool = brotliPool
|
||||
scheme = BrotliScheme
|
||||
} else if strings.Contains(encodings, GzipScheme.String()) {
|
||||
pool = gzipPool
|
||||
scheme = GzipScheme
|
||||
}
|
||||
|
||||
if pool != nil {
|
||||
w := pool.Acquire()
|
||||
if w == nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Errorf("failed to acquire compressor for %s", scheme))
|
||||
}
|
||||
rw := res.Writer
|
||||
w.Reset(rw)
|
||||
@ -124,11 +165,11 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
buf := bpool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
|
||||
grw := &zstdResponseWriter{Encoder: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf}
|
||||
grw := &compressResponseWriter{Compressor: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf, scheme: scheme.String()}
|
||||
|
||||
defer func() {
|
||||
if !grw.wroteBody {
|
||||
if res.Header().Get(echo.HeaderContentEncoding) == scheme {
|
||||
if res.Header().Get(echo.HeaderContentEncoding) == scheme.String() {
|
||||
res.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
// We have to reset response to it's pristine state when
|
||||
@ -151,7 +192,7 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
}
|
||||
w.Close()
|
||||
bpool.Put(buf)
|
||||
pool.Put(w)
|
||||
pool.Release(w)
|
||||
}()
|
||||
|
||||
res.Writer = grw
|
||||
@ -162,7 +203,7 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *zstdResponseWriter) WriteHeader(code int) {
|
||||
func (w *compressResponseWriter) WriteHeader(code int) {
|
||||
if code == http.StatusNoContent { // Issue #489
|
||||
w.ResponseWriter.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
@ -175,7 +216,7 @@ func (w *zstdResponseWriter) WriteHeader(code int) {
|
||||
w.code = code
|
||||
}
|
||||
|
||||
func (w *zstdResponseWriter) Write(b []byte) (int, error) {
|
||||
func (w *compressResponseWriter) Write(b []byte) (int, error) {
|
||||
if w.Header().Get(echo.HeaderContentType) == "" {
|
||||
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
|
||||
}
|
||||
@ -189,64 +230,49 @@ func (w *zstdResponseWriter) Write(b []byte) (int, error) {
|
||||
w.minLengthExceeded = true
|
||||
|
||||
// The minimum length is exceeded, add Content-Encoding header and write the header
|
||||
w.Header().Set(echo.HeaderContentEncoding, scheme) // Issue #806
|
||||
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
return w.Encoder.Write(w.buffer.Bytes())
|
||||
//return w.ResponseWriter.Write(w.Encoder.EncodeAll(w.buffer.Bytes(), nil))
|
||||
return w.Compressor.Write(w.buffer.Bytes())
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
return w.Encoder.Write(b)
|
||||
//return w.ResponseWriter.Write(w.Encoder.EncodeAll(b, nil))
|
||||
return w.Compressor.Write(b)
|
||||
}
|
||||
|
||||
func (w *zstdResponseWriter) Flush() {
|
||||
func (w *compressResponseWriter) Flush() {
|
||||
if !w.minLengthExceeded {
|
||||
// Enforce compression
|
||||
w.minLengthExceeded = true
|
||||
w.Header().Set(echo.HeaderContentEncoding, scheme) // Issue #806
|
||||
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
w.Encoder.Write(w.buffer.Bytes())
|
||||
//w.ResponseWriter.Write(w.Encoder.EncodeAll(w.buffer.Bytes(), nil))
|
||||
w.Compressor.Write(w.buffer.Bytes())
|
||||
}
|
||||
|
||||
w.Encoder.Flush()
|
||||
w.Compressor.Flush()
|
||||
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *zstdResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
func (w *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return w.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
func (w *zstdResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
func (w *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
if p, ok := w.ResponseWriter.(http.Pusher); ok {
|
||||
return p.Push(target, opts)
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
}
|
||||
|
||||
func zstdPool(config Config) sync.Pool {
|
||||
return sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := zstd.NewWriter(io.Discard, zstd.WithZeroFrames(true), zstd.WithEncoderLevel(zstd.EncoderLevel(config.Level)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return w
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bufferPool() sync.Pool {
|
||||
return sync.Pool{
|
||||
New: func() interface{} {
|
||||
342
http/middleware/compress/compress_test.go
Normal file
342
http/middleware/compress/compress_test.go
Normal file
@ -0,0 +1,342 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type StrangeCloser interface {
|
||||
Close()
|
||||
}
|
||||
|
||||
type Resetter interface {
|
||||
Reset(io.Reader) error
|
||||
}
|
||||
|
||||
type ReadCloseResetter interface {
|
||||
io.Reader
|
||||
io.Closer
|
||||
Resetter
|
||||
}
|
||||
|
||||
type nopReadCloseResetter struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (rcr *nopReadCloseResetter) Close() error {
|
||||
if closer, ok := rcr.Reader.(io.Closer); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
|
||||
if closer, ok := rcr.Reader.(StrangeCloser); ok {
|
||||
closer.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcr *nopReadCloseResetter) Reset(r io.Reader) error {
|
||||
resetter, ok := rcr.Reader.(Resetter)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return resetter.Reset(r)
|
||||
}
|
||||
|
||||
func getTestcases() map[Scheme]func(r io.Reader) (ReadCloseResetter, error) {
|
||||
return map[Scheme]func(r io.Reader) (ReadCloseResetter, error){
|
||||
GzipScheme: func(r io.Reader) (ReadCloseResetter, error) {
|
||||
return gzip.NewReader(r)
|
||||
},
|
||||
BrotliScheme: func(r io.Reader) (ReadCloseResetter, error) {
|
||||
return &nopReadCloseResetter{brotli.NewReader(r)}, nil
|
||||
},
|
||||
ZstdScheme: func(r io.Reader) (ReadCloseResetter, error) {
|
||||
reader, err := zstd.NewReader(r)
|
||||
return &nopReadCloseResetter{reader}, err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompress(t *testing.T) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme, reader := range schemes {
|
||||
t.Run(scheme.String(), func(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
|
||||
// Skip if no Accept-Encoding header
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
h(c)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal("test", rec.Body.String())
|
||||
|
||||
// Compression
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec = httptest.NewRecorder()
|
||||
c = e.NewContext(req, rec)
|
||||
h(c)
|
||||
assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
|
||||
r, err := reader(rec.Body)
|
||||
if assert.NoError(err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test", buf.String())
|
||||
}
|
||||
|
||||
// Gzip chunked
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
c = e.NewContext(req, rec)
|
||||
New()(func(c echo.Context) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Transfer-Encoding", "chunked")
|
||||
|
||||
// Write and flush the first part of the data
|
||||
c.Response().Write([]byte("test\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
// Read the first part of the data
|
||||
assert.True(rec.Flushed)
|
||||
assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
|
||||
|
||||
// Write and flush the second part of the data
|
||||
c.Response().Write([]byte("tost\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
// Write the final part of the data and return
|
||||
c.Response().Write([]byte("tast"))
|
||||
return nil
|
||||
})(c)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
r.Reset(rec.Body)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test\ntost\ntast", buf.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompressWithMinLength(t *testing.T) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme, reader := range schemes {
|
||||
t.Run(scheme.String(), func(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{MinLength: 5}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
e.GET("/foobar", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("foobar"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(t, rec.Body.String(), "test")
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec = httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r, err := reader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "foobar", buf.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompressNoContent(t *testing.T) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme := range schemes {
|
||||
t.Run(scheme.String(), func(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentType))
|
||||
assert.Equal(t, 0, len(rec.Body.Bytes()))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompressEmpty(t *testing.T) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme, reader := range schemes {
|
||||
t.Run(scheme.String(), func(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.String(http.StatusOK, "")
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
|
||||
r, err := reader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
var buf bytes.Buffer
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "", buf.String())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompressErrorReturned(t *testing.T) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme := range schemes {
|
||||
t.Run(scheme.String(), func(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
return echo.ErrNotFound
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusNotFound, rec.Code)
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Issue #806
|
||||
func TestCompressWithStatic(t *testing.T) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme, reader := range schemes {
|
||||
t.Run(scheme.String(), func(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.Static("/test", "./")
|
||||
req := httptest.NewRequest(http.MethodGet, "/test/compress.go", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
// Data is written out in chunks when Content-Length == "", so only
|
||||
// validate the content length if it's not set.
|
||||
if cl := rec.Header().Get("Content-Length"); cl != "" {
|
||||
assert.Equal(t, cl, rec.Body.Len())
|
||||
}
|
||||
r, err := reader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
defer r.Close()
|
||||
want, err := os.ReadFile("./compress.go")
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, want, buf.Bytes())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompress(b *testing.B) {
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme := range schemes {
|
||||
b.Run(scheme.String(), func(b *testing.B) {
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompressLarge(b *testing.B) {
|
||||
data, err := os.ReadFile("./fixtures/processList.json")
|
||||
require.NoError(b, err)
|
||||
|
||||
schemes := getTestcases()
|
||||
|
||||
for scheme := range schemes {
|
||||
b.Run(scheme.String(), func(b *testing.B) {
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write(data)
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
56
http/middleware/compress/gzip.go
Normal file
56
http/middleware/compress/gzip.go
Normal file
@ -0,0 +1,56 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
)
|
||||
|
||||
type gzipImpl struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func NewGzip(level Level) Compression {
|
||||
gzipLevel := gzip.DefaultCompression
|
||||
if level == BestCompression {
|
||||
gzipLevel = gzip.BestCompression
|
||||
} else {
|
||||
gzipLevel = gzip.BestSpeed
|
||||
}
|
||||
|
||||
g := &gzipImpl{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := gzip.NewWriterLevel(io.Discard, gzipLevel)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return w
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *gzipImpl) Acquire() Compressor {
|
||||
c := g.pool.Get()
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
x, ok := c.(Compressor)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
x.Reset(io.Discard)
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func (g *gzipImpl) Release(c Compressor) {
|
||||
c.Reset(io.Discard)
|
||||
g.pool.Put(c)
|
||||
}
|
||||
56
http/middleware/compress/zstd.go
Normal file
56
http/middleware/compress/zstd.go
Normal file
@ -0,0 +1,56 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
type zstdImpl struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func NewZstd(level Level) Compression {
|
||||
zstdLevel := zstd.SpeedDefault
|
||||
if level == BestCompression {
|
||||
zstdLevel = zstd.SpeedBestCompression
|
||||
} else {
|
||||
zstdLevel = zstd.SpeedFastest
|
||||
}
|
||||
|
||||
g := &zstdImpl{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := zstd.NewWriter(io.Discard, zstd.WithZeroFrames(true), zstd.WithEncoderLevel(zstdLevel))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return w
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *zstdImpl) Acquire() Compressor {
|
||||
c := g.pool.Get()
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
x, ok := c.(Compressor)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
x.Reset(io.Discard)
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func (g *zstdImpl) Release(c Compressor) {
|
||||
c.Reset(io.Discard)
|
||||
g.pool.Put(c)
|
||||
}
|
||||
@ -1,255 +0,0 @@
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/labstack/echo/v4/middleware"
|
||||
)
|
||||
|
||||
// Config defines the config for Gzip middleware.
|
||||
type Config struct {
|
||||
// Skipper defines a function to skip middleware.
|
||||
Skipper middleware.Skipper
|
||||
|
||||
// Gzip compression level.
|
||||
// Optional. Default value -1.
|
||||
Level int
|
||||
|
||||
// Length threshold before gzip compression
|
||||
// is used. Optional. Default value 0
|
||||
MinLength int
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
wroteHeader bool
|
||||
wroteBody bool
|
||||
minLength int
|
||||
minLengthExceeded bool
|
||||
buffer *bytes.Buffer
|
||||
code int
|
||||
headerContentLength string
|
||||
}
|
||||
|
||||
const gzipScheme = "gzip"
|
||||
|
||||
const (
|
||||
BestCompression = gzip.BestCompression
|
||||
BestSpeed = gzip.BestSpeed
|
||||
DefaultCompression = gzip.DefaultCompression
|
||||
NoCompression = gzip.NoCompression
|
||||
)
|
||||
|
||||
// DefaultConfig is the default Gzip middleware config.
|
||||
var DefaultConfig = Config{
|
||||
Skipper: middleware.DefaultSkipper,
|
||||
Level: DefaultCompression,
|
||||
MinLength: 0,
|
||||
}
|
||||
|
||||
// ContentTypesSkipper returns a Skipper based on the list of content types
|
||||
// that should be compressed. If the list is empty, all responses will be
|
||||
// compressed.
|
||||
func ContentTypeSkipper(contentTypes []string) middleware.Skipper {
|
||||
return func(c echo.Context) bool {
|
||||
// If no allowed content types are given, compress all
|
||||
if len(contentTypes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Iterate through the allowed content types and don't skip if the content type matches
|
||||
responseContentType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
|
||||
for _, contentType := range contentTypes {
|
||||
if strings.Contains(responseContentType, contentType) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// New returns a middleware which compresses HTTP response using gzip compression
|
||||
// scheme.
|
||||
func New() echo.MiddlewareFunc {
|
||||
return NewWithConfig(DefaultConfig)
|
||||
}
|
||||
|
||||
// NewWithConfig return Gzip middleware with config.
|
||||
// See: `New()`.
|
||||
func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
// Defaults
|
||||
if config.Skipper == nil {
|
||||
config.Skipper = DefaultConfig.Skipper
|
||||
}
|
||||
|
||||
if config.Level == 0 {
|
||||
config.Level = DefaultConfig.Level
|
||||
}
|
||||
|
||||
if config.MinLength < 0 {
|
||||
config.MinLength = DefaultConfig.MinLength
|
||||
}
|
||||
|
||||
pool := gzipPool(config)
|
||||
bpool := bufferPool()
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if config.Skipper(c) {
|
||||
return next(c)
|
||||
}
|
||||
|
||||
res := c.Response()
|
||||
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
|
||||
|
||||
if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), gzipScheme) {
|
||||
i := pool.Get()
|
||||
w, ok := i.(*gzip.Writer)
|
||||
if !ok {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, i.(error).Error())
|
||||
}
|
||||
rw := res.Writer
|
||||
w.Reset(rw)
|
||||
|
||||
buf := bpool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
|
||||
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf}
|
||||
|
||||
defer func() {
|
||||
if !grw.wroteBody {
|
||||
if res.Header().Get(echo.HeaderContentEncoding) == gzipScheme {
|
||||
res.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
// We have to reset response to it's pristine state when
|
||||
// nothing is written to body or error is returned.
|
||||
// See issue #424, #407.
|
||||
res.Writer = rw
|
||||
w.Reset(io.Discard)
|
||||
} else if !grw.minLengthExceeded {
|
||||
// If the minimum content length hasn't exceeded, write the uncompressed response
|
||||
res.Writer = rw
|
||||
if grw.wroteHeader {
|
||||
// Restore Content-Length header in case it was deleted
|
||||
if len(grw.headerContentLength) != 0 {
|
||||
grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
|
||||
}
|
||||
grw.ResponseWriter.WriteHeader(grw.code)
|
||||
}
|
||||
grw.buffer.WriteTo(rw)
|
||||
w.Reset(io.Discard)
|
||||
}
|
||||
w.Close()
|
||||
bpool.Put(buf)
|
||||
pool.Put(w)
|
||||
}()
|
||||
|
||||
res.Writer = grw
|
||||
}
|
||||
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) WriteHeader(code int) {
|
||||
if code == http.StatusNoContent { // Issue #489
|
||||
w.ResponseWriter.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
w.headerContentLength = w.Header().Get(echo.HeaderContentLength)
|
||||
w.Header().Del(echo.HeaderContentLength) // Issue #444
|
||||
|
||||
w.wroteHeader = true
|
||||
|
||||
// Delay writing of the header until we know if we'll actually compress the response
|
||||
w.code = code
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if w.Header().Get(echo.HeaderContentType) == "" {
|
||||
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
|
||||
}
|
||||
|
||||
w.wroteBody = true
|
||||
|
||||
if !w.minLengthExceeded {
|
||||
n, err := w.buffer.Write(b)
|
||||
|
||||
if w.buffer.Len() >= w.minLength {
|
||||
w.minLengthExceeded = true
|
||||
|
||||
// The minimum length is exceeded, add Content-Encoding header and write the header
|
||||
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
return w.Writer.Write(w.buffer.Bytes())
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Flush() {
|
||||
if !w.minLengthExceeded {
|
||||
// Enforce compression
|
||||
w.minLengthExceeded = true
|
||||
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
w.Writer.Write(w.buffer.Bytes())
|
||||
}
|
||||
|
||||
w.Writer.(*gzip.Writer).Flush()
|
||||
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return w.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
if p, ok := w.ResponseWriter.(http.Pusher); ok {
|
||||
return p.Push(target, opts)
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
}
|
||||
|
||||
func gzipPool(config Config) sync.Pool {
|
||||
return sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := gzip.NewWriterLevel(io.Discard, config.Level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return w
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bufferPool() sync.Pool {
|
||||
return sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := &bytes.Buffer{}
|
||||
return b
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1,266 +0,0 @@
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGzip(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
|
||||
// Skip if no Accept-Encoding header
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
h(c)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal("test", rec.Body.String())
|
||||
|
||||
// Gzip
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec = httptest.NewRecorder()
|
||||
c = e.NewContext(req, rec)
|
||||
h(c)
|
||||
assert.Equal(gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test", buf.String())
|
||||
}
|
||||
|
||||
chunkBuf := make([]byte, 5)
|
||||
|
||||
// Gzip chunked
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
c = e.NewContext(req, rec)
|
||||
New()(func(c echo.Context) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Transfer-Encoding", "chunked")
|
||||
|
||||
// Write and flush the first part of the data
|
||||
c.Response().Write([]byte("test\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
// Read the first part of the data
|
||||
assert.True(rec.Flushed)
|
||||
assert.Equal(gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r.Reset(rec.Body)
|
||||
|
||||
_, err = io.ReadFull(r, chunkBuf)
|
||||
assert.NoError(err)
|
||||
assert.Equal("test\n", string(chunkBuf))
|
||||
|
||||
// Write and flush the second part of the data
|
||||
c.Response().Write([]byte("test\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
_, err = io.ReadFull(r, chunkBuf)
|
||||
assert.NoError(err)
|
||||
assert.Equal("test\n", string(chunkBuf))
|
||||
|
||||
// Write the final part of the data and return
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})(c)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test", buf.String())
|
||||
}
|
||||
|
||||
func TestGzipWithMinLength(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{MinLength: 5}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
e.GET("/foobar", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("foobar"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(t, rec.Body.String(), "test")
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec = httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "foobar", buf.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipNoContent(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentType))
|
||||
assert.Equal(t, 0, len(rec.Body.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipEmpty(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.String(http.StatusOK, "")
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Equal(t, gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
var buf bytes.Buffer
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "", buf.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipErrorReturned(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
return echo.ErrNotFound
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusNotFound, rec.Code)
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
}
|
||||
|
||||
func TestGzipErrorReturnedInvalidConfig(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{Level: 12}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusInternalServerError, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), "gzip")
|
||||
}
|
||||
|
||||
// Issue #806
|
||||
func TestGzipWithStatic(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.Static("/test", "./")
|
||||
req := httptest.NewRequest(http.MethodGet, "/test/gzip.go", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
// Data is written out in chunks when Content-Length == "", so only
|
||||
// validate the content length if it's not set.
|
||||
if cl := rec.Header().Get("Content-Length"); cl != "" {
|
||||
assert.Equal(t, cl, rec.Body.Len())
|
||||
}
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
defer r.Close()
|
||||
want, err := os.ReadFile("./gzip.go")
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, want, buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGzip(b *testing.B) {
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Gzip
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGzipLarge(b *testing.B) {
|
||||
data, err := os.ReadFile("./fixtures/processList.json")
|
||||
require.NoError(b, err)
|
||||
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write(data) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Gzip
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,269 +0,0 @@
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestZstd(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
|
||||
// Skip if no Accept-Encoding header
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
h(c)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal("test", rec.Body.String())
|
||||
|
||||
// zstd
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec = httptest.NewRecorder()
|
||||
c = e.NewContext(req, rec)
|
||||
h(c)
|
||||
assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
|
||||
r, err := zstd.NewReader(rec.Body)
|
||||
if assert.NoError(err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test", buf.String())
|
||||
}
|
||||
|
||||
chunkBuf := make([]byte, 5)
|
||||
|
||||
// zstd chunked
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
// Force body to be a reader, otherwise zstd will use .Bytes() and .Len()
|
||||
reader := io.TeeReader(rec.Body, io.Discard)
|
||||
|
||||
c = e.NewContext(req, rec)
|
||||
New()(func(c echo.Context) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Transfer-Encoding", "chunked")
|
||||
|
||||
// Write and flush the first part of the data
|
||||
c.Response().Write([]byte("test\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
// Read the first part of the data
|
||||
assert.True(rec.Flushed)
|
||||
assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r.Reset(reader)
|
||||
|
||||
_, err = io.ReadFull(r, chunkBuf)
|
||||
assert.NoError(err)
|
||||
assert.Equal("test\n", string(chunkBuf))
|
||||
|
||||
// Write and flush the second part of the data
|
||||
c.Response().Write([]byte("tost\n"))
|
||||
c.Response().Flush()
|
||||
r.Reset(reader)
|
||||
|
||||
_, err = io.ReadFull(r, chunkBuf)
|
||||
assert.NoError(err)
|
||||
assert.Equal("tost\n", string(chunkBuf))
|
||||
|
||||
// Write the final part of the data and return
|
||||
c.Response().Write([]byte("tast"))
|
||||
return nil
|
||||
})(c)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
defer r.Close()
|
||||
r.Reset(reader)
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("tast", buf.String())
|
||||
}
|
||||
|
||||
func TestZstdWithMinLength(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{MinLength: 5}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
e.GET("/foobar", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("foobar"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(t, rec.Body.String(), "test")
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec = httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r, err := zstd.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "foobar", buf.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestZstdNoContent(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentType))
|
||||
assert.Equal(t, 0, len(rec.Body.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestZstdEmpty(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.String(http.StatusOK, "")
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
|
||||
r, err := zstd.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
var buf bytes.Buffer
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "", buf.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestZstdErrorReturned(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
return echo.ErrNotFound
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusNotFound, rec.Code)
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
}
|
||||
|
||||
func TestZstdErrorReturnedInvalidConfig(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{Level: 12}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusInternalServerError, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), "unknown encoder level")
|
||||
}
|
||||
|
||||
// Issue #806
|
||||
func TestZstdWithStatic(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.Static("/test", "./")
|
||||
req := httptest.NewRequest(http.MethodGet, "/test/zstd.go", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
// Data is written out in chunks when Content-Length == "", so only
|
||||
// validate the content length if it's not set.
|
||||
if cl := rec.Header().Get("Content-Length"); cl != "" {
|
||||
assert.Equal(t, cl, rec.Body.Len())
|
||||
}
|
||||
r, err := zstd.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
defer r.Close()
|
||||
want, err := os.ReadFile("./zstd.go")
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, want, buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkZstd(b *testing.B) {
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkZstdLarge(b *testing.B) {
|
||||
data, err := os.ReadFile("./fixtures/processList.json")
|
||||
require.NoError(b, err)
|
||||
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write(data) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
}
|
||||
@ -57,8 +57,8 @@ import (
|
||||
"github.com/datarhei/core/v16/srt"
|
||||
|
||||
mwcache "github.com/datarhei/core/v16/http/middleware/cache"
|
||||
mwcompress "github.com/datarhei/core/v16/http/middleware/compress"
|
||||
mwcors "github.com/datarhei/core/v16/http/middleware/cors"
|
||||
mwgzip "github.com/datarhei/core/v16/http/middleware/gzip"
|
||||
mwhlsrewrite "github.com/datarhei/core/v16/http/middleware/hlsrewrite"
|
||||
mwiam "github.com/datarhei/core/v16/http/middleware/iam"
|
||||
mwiplimit "github.com/datarhei/core/v16/http/middleware/iplimit"
|
||||
@ -484,10 +484,10 @@ func (s *server) HTTPStatus() map[int]uint64 {
|
||||
}
|
||||
|
||||
func (s *server) setRoutes() {
|
||||
gzipMiddleware := mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
gzipMiddleware := mwcompress.NewWithConfig(mwcompress.Config{
|
||||
Level: mwcompress.BestSpeed,
|
||||
MinLength: 1000,
|
||||
Skipper: mwgzip.ContentTypeSkipper(nil),
|
||||
Skipper: mwcompress.ContentTypeSkipper(nil),
|
||||
})
|
||||
|
||||
// API router grouo
|
||||
@ -525,9 +525,9 @@ func (s *server) setRoutes() {
|
||||
}))
|
||||
|
||||
if filesystem.Gzip {
|
||||
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
Level: mwgzip.BestSpeed,
|
||||
fs.Use(mwcompress.NewWithConfig(mwcompress.Config{
|
||||
Skipper: mwcompress.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
Level: mwcompress.BestSpeed,
|
||||
MinLength: 1000,
|
||||
}))
|
||||
}
|
||||
|
||||
19
vendor/github.com/andybalholm/brotli/LICENSE
generated
vendored
Normal file
19
vendor/github.com/andybalholm/brotli/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
14
vendor/github.com/andybalholm/brotli/README.md
generated
vendored
Normal file
14
vendor/github.com/andybalholm/brotli/README.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
This package is a brotli compressor and decompressor implemented in Go.
|
||||
It was translated from the reference implementation (https://github.com/google/brotli)
|
||||
with the `c2go` tool at https://github.com/andybalholm/c2go.
|
||||
|
||||
I have been working on new compression algorithms (not translated from C)
|
||||
in the matchfinder package.
|
||||
You can use them with the NewWriterV2 function.
|
||||
Currently they give better results than the old implementation
|
||||
(at least for compressing my test file, Newton’s *Opticks*)
|
||||
on levels 2 to 6.
|
||||
|
||||
I am using it in production with https://github.com/andybalholm/redwood.
|
||||
|
||||
API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc.
|
||||
185
vendor/github.com/andybalholm/brotli/backward_references.go
generated
vendored
Normal file
185
vendor/github.com/andybalholm/brotli/backward_references.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Function to find backward reference copies. */
|
||||
|
||||
func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint {
|
||||
if distance <= max_distance {
|
||||
var distance_plus_3 uint = distance + 3
|
||||
var offset0 uint = distance_plus_3 - uint(dist_cache[0])
|
||||
var offset1 uint = distance_plus_3 - uint(dist_cache[1])
|
||||
if distance == uint(dist_cache[0]) {
|
||||
return 0
|
||||
} else if distance == uint(dist_cache[1]) {
|
||||
return 1
|
||||
} else if offset0 < 7 {
|
||||
return (0x9750468 >> (4 * offset0)) & 0xF
|
||||
} else if offset1 < 7 {
|
||||
return (0xFDB1ACE >> (4 * offset1)) & 0xF
|
||||
} else if distance == uint(dist_cache[2]) {
|
||||
return 2
|
||||
} else if distance == uint(dist_cache[3]) {
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
return distance + numDistanceShortCodes - 1
|
||||
}
|
||||
|
||||
var hasherSearchResultPool sync.Pool
|
||||
|
||||
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var insert_length uint = *last_insert_len
|
||||
var pos_end uint = position + num_bytes
|
||||
var store_end uint
|
||||
if num_bytes >= hasher.StoreLookahead() {
|
||||
store_end = position + num_bytes - hasher.StoreLookahead() + 1
|
||||
} else {
|
||||
store_end = position
|
||||
}
|
||||
var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params)
|
||||
var apply_random_heuristics uint = position + random_heuristics_window_size
|
||||
var gap uint = 0
|
||||
/* Set maximum distance, see section 9.1. of the spec. */
|
||||
|
||||
const kMinScore uint = scoreBase + 100
|
||||
|
||||
/* For speed up heuristics for random data. */
|
||||
|
||||
/* Minimum score to accept a backward reference. */
|
||||
hasher.PrepareDistanceCache(dist_cache)
|
||||
sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||
if sr2 == nil {
|
||||
sr2 = &hasherSearchResult{}
|
||||
}
|
||||
sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||
if sr == nil {
|
||||
sr = &hasherSearchResult{}
|
||||
}
|
||||
|
||||
for position+hasher.HashTypeLength() < pos_end {
|
||||
var max_length uint = pos_end - position
|
||||
var max_distance uint = brotli_min_size_t(position, max_backward_limit)
|
||||
sr.len = 0
|
||||
sr.len_code_delta = 0
|
||||
sr.distance = 0
|
||||
sr.score = kMinScore
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
|
||||
if sr.score > kMinScore {
|
||||
/* Found a match. Let's look for something even better ahead. */
|
||||
var delayed_backward_references_in_row int = 0
|
||||
max_length--
|
||||
for ; ; max_length-- {
|
||||
var cost_diff_lazy uint = 175
|
||||
if params.quality < minQualityForExtensiveReferenceSearch {
|
||||
sr2.len = brotli_min_size_t(sr.len-1, max_length)
|
||||
} else {
|
||||
sr2.len = 0
|
||||
}
|
||||
sr2.len_code_delta = 0
|
||||
sr2.distance = 0
|
||||
sr2.score = kMinScore
|
||||
max_distance = brotli_min_size_t(position+1, max_backward_limit)
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
|
||||
if sr2.score >= sr.score+cost_diff_lazy {
|
||||
/* Ok, let's just write one byte for now and start a match from the
|
||||
next byte. */
|
||||
position++
|
||||
|
||||
insert_length++
|
||||
*sr = *sr2
|
||||
delayed_backward_references_in_row++
|
||||
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size
|
||||
max_distance = brotli_min_size_t(position, max_backward_limit)
|
||||
{
|
||||
/* The first 16 codes are special short-codes,
|
||||
and the minimum offset is 1. */
|
||||
var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache)
|
||||
if (sr.distance <= (max_distance + gap)) && distance_code > 0 {
|
||||
dist_cache[3] = dist_cache[2]
|
||||
dist_cache[2] = dist_cache[1]
|
||||
dist_cache[1] = dist_cache[0]
|
||||
dist_cache[0] = int(sr.distance)
|
||||
hasher.PrepareDistanceCache(dist_cache)
|
||||
}
|
||||
|
||||
*commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code))
|
||||
}
|
||||
|
||||
*num_literals += insert_length
|
||||
insert_length = 0
|
||||
/* Put the hash keys into the table, if there are enough bytes left.
|
||||
Depending on the hasher implementation, it can push all positions
|
||||
in the given range or only a subset of them.
|
||||
Avoid hash poisoning with RLE data. */
|
||||
{
|
||||
var range_start uint = position + 2
|
||||
var range_end uint = brotli_min_size_t(position+sr.len, store_end)
|
||||
if sr.distance < sr.len>>2 {
|
||||
range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2)))
|
||||
}
|
||||
|
||||
hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end)
|
||||
}
|
||||
|
||||
position += sr.len
|
||||
} else {
|
||||
insert_length++
|
||||
position++
|
||||
|
||||
/* If we have not seen matches for a long time, we can skip some
|
||||
match lookups. Unsuccessful match lookups are very very expensive
|
||||
and this kind of a heuristic speeds up compression quite
|
||||
a lot. */
|
||||
if position > apply_random_heuristics {
|
||||
/* Going through uncompressible data, jump. */
|
||||
if position > apply_random_heuristics+4*random_heuristics_window_size {
|
||||
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4)
|
||||
/* It is quite a long time since we saw a copy, so we assume
|
||||
that this data is not compressible, and store hashes less
|
||||
often. Hashes of non compressible data are less likely to
|
||||
turn out to be useful in the future, too, so we store less of
|
||||
them to not to flood out the hash table of good compressible
|
||||
data. */
|
||||
|
||||
var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin)
|
||||
for ; position < pos_jump; position += 4 {
|
||||
hasher.Store(ringbuffer, ringbuffer_mask, position)
|
||||
insert_length += 4
|
||||
}
|
||||
} else {
|
||||
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2)
|
||||
var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin)
|
||||
for ; position < pos_jump; position += 2 {
|
||||
hasher.Store(ringbuffer, ringbuffer_mask, position)
|
||||
insert_length += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
insert_length += pos_end - position
|
||||
*last_insert_len = insert_length
|
||||
|
||||
hasherSearchResultPool.Put(sr)
|
||||
hasherSearchResultPool.Put(sr2)
|
||||
}
|
||||
796
vendor/github.com/andybalholm/brotli/backward_references_hq.go
generated
vendored
Normal file
796
vendor/github.com/andybalholm/brotli/backward_references_hq.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
436
vendor/github.com/andybalholm/brotli/bit_cost.go
generated
vendored
Normal file
436
vendor/github.com/andybalholm/brotli/bit_cost.go
generated
vendored
Normal file
@ -0,0 +1,436 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Functions to estimate the bit cost of Huffman trees. */
|
||||
func shannonEntropy(population []uint32, size uint, total *uint) float64 {
|
||||
var sum uint = 0
|
||||
var retval float64 = 0
|
||||
var population_end []uint32 = population[size:]
|
||||
var p uint
|
||||
for -cap(population) < -cap(population_end) {
|
||||
p = uint(population[0])
|
||||
population = population[1:]
|
||||
sum += p
|
||||
retval -= float64(p) * fastLog2(p)
|
||||
}
|
||||
|
||||
if sum != 0 {
|
||||
retval += float64(sum) * fastLog2(sum)
|
||||
}
|
||||
*total = sum
|
||||
return retval
|
||||
}
|
||||
|
||||
func bitsEntropy(population []uint32, size uint) float64 {
|
||||
var sum uint
|
||||
var retval float64 = shannonEntropy(population, size, &sum)
|
||||
if retval < float64(sum) {
|
||||
/* At least one bit per literal is needed. */
|
||||
retval = float64(sum)
|
||||
}
|
||||
|
||||
return retval
|
||||
}
|
||||
|
||||
const kOneSymbolHistogramCost float64 = 12
|
||||
const kTwoSymbolHistogramCost float64 = 20
|
||||
const kThreeSymbolHistogramCost float64 = 28
|
||||
const kFourSymbolHistogramCost float64 = 37
|
||||
|
||||
func populationCostLiteral(histogram *histogramLiteral) float64 {
|
||||
var data_size uint = histogramDataSizeLiteral()
|
||||
var count int = 0
|
||||
var s [5]uint
|
||||
var bits float64 = 0.0
|
||||
var i uint
|
||||
if histogram.total_count_ == 0 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
for i = 0; i < data_size; i++ {
|
||||
if histogram.data_[i] > 0 {
|
||||
s[count] = i
|
||||
count++
|
||||
if count > 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
if count == 2 {
|
||||
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
|
||||
}
|
||||
|
||||
if count == 3 {
|
||||
var histo0 uint32 = histogram.data_[s[0]]
|
||||
var histo1 uint32 = histogram.data_[s[1]]
|
||||
var histo2 uint32 = histogram.data_[s[2]]
|
||||
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
|
||||
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
|
||||
}
|
||||
|
||||
if count == 4 {
|
||||
var histo [4]uint32
|
||||
var h23 uint32
|
||||
var histomax uint32
|
||||
for i = 0; i < 4; i++ {
|
||||
histo[i] = histogram.data_[s[i]]
|
||||
}
|
||||
|
||||
/* Sort */
|
||||
for i = 0; i < 4; i++ {
|
||||
var j uint
|
||||
for j = i + 1; j < 4; j++ {
|
||||
if histo[j] > histo[i] {
|
||||
var tmp uint32 = histo[j]
|
||||
histo[j] = histo[i]
|
||||
histo[i] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h23 = histo[2] + histo[3]
|
||||
histomax = brotli_max_uint32_t(h23, histo[0])
|
||||
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
|
||||
}
|
||||
{
|
||||
var max_depth uint = 1
|
||||
var depth_histo = [codeLengthCodes]uint32{0}
|
||||
/* In this loop we compute the entropy of the histogram and simultaneously
|
||||
build a simplified histogram of the code length codes where we use the
|
||||
zero repeat code 17, but we don't use the non-zero repeat code 16. */
|
||||
|
||||
var log2total float64 = fastLog2(histogram.total_count_)
|
||||
for i = 0; i < data_size; {
|
||||
if histogram.data_[i] > 0 {
|
||||
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
|
||||
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
|
||||
= log2(total_count) - log2(count(symbol)) */
|
||||
|
||||
var depth uint = uint(log2p + 0.5)
|
||||
/* Approximate the bit depth by round(-log2(P(symbol))) */
|
||||
bits += float64(histogram.data_[i]) * log2p
|
||||
|
||||
if depth > 15 {
|
||||
depth = 15
|
||||
}
|
||||
|
||||
if depth > max_depth {
|
||||
max_depth = depth
|
||||
}
|
||||
|
||||
depth_histo[depth]++
|
||||
i++
|
||||
} else {
|
||||
var reps uint32 = 1
|
||||
/* Compute the run length of zeros and add the appropriate number of 0
|
||||
and 17 code length codes to the code length code histogram. */
|
||||
|
||||
var k uint
|
||||
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
|
||||
reps++
|
||||
}
|
||||
|
||||
i += uint(reps)
|
||||
if i == data_size {
|
||||
/* Don't add any cost for the last zero run, since these are encoded
|
||||
only implicitly. */
|
||||
break
|
||||
}
|
||||
|
||||
if reps < 3 {
|
||||
depth_histo[0] += reps
|
||||
} else {
|
||||
reps -= 2
|
||||
for reps > 0 {
|
||||
depth_histo[repeatZeroCodeLength]++
|
||||
|
||||
/* Add the 3 extra bits for the 17 code length code. */
|
||||
bits += 3
|
||||
|
||||
reps >>= 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the estimated encoding cost of the code length code histogram. */
|
||||
bits += float64(18 + 2*max_depth)
|
||||
|
||||
/* Add the entropy of the code length code histogram. */
|
||||
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
||||
|
||||
func populationCostCommand(histogram *histogramCommand) float64 {
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var count int = 0
|
||||
var s [5]uint
|
||||
var bits float64 = 0.0
|
||||
var i uint
|
||||
if histogram.total_count_ == 0 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
for i = 0; i < data_size; i++ {
|
||||
if histogram.data_[i] > 0 {
|
||||
s[count] = i
|
||||
count++
|
||||
if count > 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
if count == 2 {
|
||||
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
|
||||
}
|
||||
|
||||
if count == 3 {
|
||||
var histo0 uint32 = histogram.data_[s[0]]
|
||||
var histo1 uint32 = histogram.data_[s[1]]
|
||||
var histo2 uint32 = histogram.data_[s[2]]
|
||||
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
|
||||
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
|
||||
}
|
||||
|
||||
if count == 4 {
|
||||
var histo [4]uint32
|
||||
var h23 uint32
|
||||
var histomax uint32
|
||||
for i = 0; i < 4; i++ {
|
||||
histo[i] = histogram.data_[s[i]]
|
||||
}
|
||||
|
||||
/* Sort */
|
||||
for i = 0; i < 4; i++ {
|
||||
var j uint
|
||||
for j = i + 1; j < 4; j++ {
|
||||
if histo[j] > histo[i] {
|
||||
var tmp uint32 = histo[j]
|
||||
histo[j] = histo[i]
|
||||
histo[i] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h23 = histo[2] + histo[3]
|
||||
histomax = brotli_max_uint32_t(h23, histo[0])
|
||||
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
|
||||
}
|
||||
{
|
||||
var max_depth uint = 1
|
||||
var depth_histo = [codeLengthCodes]uint32{0}
|
||||
/* In this loop we compute the entropy of the histogram and simultaneously
|
||||
build a simplified histogram of the code length codes where we use the
|
||||
zero repeat code 17, but we don't use the non-zero repeat code 16. */
|
||||
|
||||
var log2total float64 = fastLog2(histogram.total_count_)
|
||||
for i = 0; i < data_size; {
|
||||
if histogram.data_[i] > 0 {
|
||||
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
|
||||
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
|
||||
= log2(total_count) - log2(count(symbol)) */
|
||||
|
||||
var depth uint = uint(log2p + 0.5)
|
||||
/* Approximate the bit depth by round(-log2(P(symbol))) */
|
||||
bits += float64(histogram.data_[i]) * log2p
|
||||
|
||||
if depth > 15 {
|
||||
depth = 15
|
||||
}
|
||||
|
||||
if depth > max_depth {
|
||||
max_depth = depth
|
||||
}
|
||||
|
||||
depth_histo[depth]++
|
||||
i++
|
||||
} else {
|
||||
var reps uint32 = 1
|
||||
/* Compute the run length of zeros and add the appropriate number of 0
|
||||
and 17 code length codes to the code length code histogram. */
|
||||
|
||||
var k uint
|
||||
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
|
||||
reps++
|
||||
}
|
||||
|
||||
i += uint(reps)
|
||||
if i == data_size {
|
||||
/* Don't add any cost for the last zero run, since these are encoded
|
||||
only implicitly. */
|
||||
break
|
||||
}
|
||||
|
||||
if reps < 3 {
|
||||
depth_histo[0] += reps
|
||||
} else {
|
||||
reps -= 2
|
||||
for reps > 0 {
|
||||
depth_histo[repeatZeroCodeLength]++
|
||||
|
||||
/* Add the 3 extra bits for the 17 code length code. */
|
||||
bits += 3
|
||||
|
||||
reps >>= 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the estimated encoding cost of the code length code histogram. */
|
||||
bits += float64(18 + 2*max_depth)
|
||||
|
||||
/* Add the entropy of the code length code histogram. */
|
||||
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
||||
|
||||
func populationCostDistance(histogram *histogramDistance) float64 {
|
||||
var data_size uint = histogramDataSizeDistance()
|
||||
var count int = 0
|
||||
var s [5]uint
|
||||
var bits float64 = 0.0
|
||||
var i uint
|
||||
if histogram.total_count_ == 0 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
for i = 0; i < data_size; i++ {
|
||||
if histogram.data_[i] > 0 {
|
||||
s[count] = i
|
||||
count++
|
||||
if count > 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
if count == 2 {
|
||||
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
|
||||
}
|
||||
|
||||
if count == 3 {
|
||||
var histo0 uint32 = histogram.data_[s[0]]
|
||||
var histo1 uint32 = histogram.data_[s[1]]
|
||||
var histo2 uint32 = histogram.data_[s[2]]
|
||||
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
|
||||
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
|
||||
}
|
||||
|
||||
if count == 4 {
|
||||
var histo [4]uint32
|
||||
var h23 uint32
|
||||
var histomax uint32
|
||||
for i = 0; i < 4; i++ {
|
||||
histo[i] = histogram.data_[s[i]]
|
||||
}
|
||||
|
||||
/* Sort */
|
||||
for i = 0; i < 4; i++ {
|
||||
var j uint
|
||||
for j = i + 1; j < 4; j++ {
|
||||
if histo[j] > histo[i] {
|
||||
var tmp uint32 = histo[j]
|
||||
histo[j] = histo[i]
|
||||
histo[i] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h23 = histo[2] + histo[3]
|
||||
histomax = brotli_max_uint32_t(h23, histo[0])
|
||||
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
|
||||
}
|
||||
{
|
||||
var max_depth uint = 1
|
||||
var depth_histo = [codeLengthCodes]uint32{0}
|
||||
/* In this loop we compute the entropy of the histogram and simultaneously
|
||||
build a simplified histogram of the code length codes where we use the
|
||||
zero repeat code 17, but we don't use the non-zero repeat code 16. */
|
||||
|
||||
var log2total float64 = fastLog2(histogram.total_count_)
|
||||
for i = 0; i < data_size; {
|
||||
if histogram.data_[i] > 0 {
|
||||
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
|
||||
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
|
||||
= log2(total_count) - log2(count(symbol)) */
|
||||
|
||||
var depth uint = uint(log2p + 0.5)
|
||||
/* Approximate the bit depth by round(-log2(P(symbol))) */
|
||||
bits += float64(histogram.data_[i]) * log2p
|
||||
|
||||
if depth > 15 {
|
||||
depth = 15
|
||||
}
|
||||
|
||||
if depth > max_depth {
|
||||
max_depth = depth
|
||||
}
|
||||
|
||||
depth_histo[depth]++
|
||||
i++
|
||||
} else {
|
||||
var reps uint32 = 1
|
||||
/* Compute the run length of zeros and add the appropriate number of 0
|
||||
and 17 code length codes to the code length code histogram. */
|
||||
|
||||
var k uint
|
||||
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
|
||||
reps++
|
||||
}
|
||||
|
||||
i += uint(reps)
|
||||
if i == data_size {
|
||||
/* Don't add any cost for the last zero run, since these are encoded
|
||||
only implicitly. */
|
||||
break
|
||||
}
|
||||
|
||||
if reps < 3 {
|
||||
depth_histo[0] += reps
|
||||
} else {
|
||||
reps -= 2
|
||||
for reps > 0 {
|
||||
depth_histo[repeatZeroCodeLength]++
|
||||
|
||||
/* Add the 3 extra bits for the 17 code length code. */
|
||||
bits += 3
|
||||
|
||||
reps >>= 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the estimated encoding cost of the code length code histogram. */
|
||||
bits += float64(18 + 2*max_depth)
|
||||
|
||||
/* Add the entropy of the code length code histogram. */
|
||||
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
||||
266
vendor/github.com/andybalholm/brotli/bit_reader.go
generated
vendored
Normal file
266
vendor/github.com/andybalholm/brotli/bit_reader.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Bit reading helpers */
|
||||
|
||||
const shortFillBitWindowRead = (8 >> 1)
|
||||
|
||||
var kBitMask = [33]uint32{
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000003,
|
||||
0x00000007,
|
||||
0x0000000F,
|
||||
0x0000001F,
|
||||
0x0000003F,
|
||||
0x0000007F,
|
||||
0x000000FF,
|
||||
0x000001FF,
|
||||
0x000003FF,
|
||||
0x000007FF,
|
||||
0x00000FFF,
|
||||
0x00001FFF,
|
||||
0x00003FFF,
|
||||
0x00007FFF,
|
||||
0x0000FFFF,
|
||||
0x0001FFFF,
|
||||
0x0003FFFF,
|
||||
0x0007FFFF,
|
||||
0x000FFFFF,
|
||||
0x001FFFFF,
|
||||
0x003FFFFF,
|
||||
0x007FFFFF,
|
||||
0x00FFFFFF,
|
||||
0x01FFFFFF,
|
||||
0x03FFFFFF,
|
||||
0x07FFFFFF,
|
||||
0x0FFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
0xFFFFFFFF,
|
||||
}
|
||||
|
||||
func bitMask(n uint32) uint32 {
|
||||
return kBitMask[n]
|
||||
}
|
||||
|
||||
type bitReader struct {
|
||||
val_ uint64
|
||||
bit_pos_ uint32
|
||||
input []byte
|
||||
input_len uint
|
||||
byte_pos uint
|
||||
}
|
||||
|
||||
type bitReaderState struct {
|
||||
val_ uint64
|
||||
bit_pos_ uint32
|
||||
input []byte
|
||||
input_len uint
|
||||
byte_pos uint
|
||||
}
|
||||
|
||||
/* Initializes the BrotliBitReader fields. */
|
||||
|
||||
/* Ensures that accumulator is not empty.
|
||||
May consume up to sizeof(brotli_reg_t) - 1 bytes of input.
|
||||
Returns false if data is required but there is no input available.
|
||||
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned
|
||||
reading. */
|
||||
func bitReaderSaveState(from *bitReader, to *bitReaderState) {
|
||||
to.val_ = from.val_
|
||||
to.bit_pos_ = from.bit_pos_
|
||||
to.input = from.input
|
||||
to.input_len = from.input_len
|
||||
to.byte_pos = from.byte_pos
|
||||
}
|
||||
|
||||
func bitReaderRestoreState(to *bitReader, from *bitReaderState) {
|
||||
to.val_ = from.val_
|
||||
to.bit_pos_ = from.bit_pos_
|
||||
to.input = from.input
|
||||
to.input_len = from.input_len
|
||||
to.byte_pos = from.byte_pos
|
||||
}
|
||||
|
||||
func getAvailableBits(br *bitReader) uint32 {
|
||||
return 64 - br.bit_pos_
|
||||
}
|
||||
|
||||
/* Returns amount of unread bytes the bit reader still has buffered from the
|
||||
BrotliInput, including whole bytes in br->val_. */
|
||||
func getRemainingBytes(br *bitReader) uint {
|
||||
return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3))
|
||||
}
|
||||
|
||||
/* Checks if there is at least |num| bytes left in the input ring-buffer
|
||||
(excluding the bits remaining in br->val_). */
|
||||
func checkInputAmount(br *bitReader, num uint) bool {
|
||||
return br.input_len-br.byte_pos >= num
|
||||
}
|
||||
|
||||
/* Guarantees that there are at least |n_bits| + 1 bits in accumulator.
|
||||
Precondition: accumulator contains at least 1 bit.
|
||||
|n_bits| should be in the range [1..24] for regular build. For portable
|
||||
non-64-bit little-endian build only 16 bits are safe to request. */
|
||||
func fillBitWindow(br *bitReader, n_bits uint32) {
|
||||
if br.bit_pos_ >= 32 {
|
||||
br.val_ >>= 32
|
||||
br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */
|
||||
br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32
|
||||
br.byte_pos += 4
|
||||
}
|
||||
}
|
||||
|
||||
/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no
|
||||
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */
|
||||
func fillBitWindow16(br *bitReader) {
|
||||
fillBitWindow(br, 17)
|
||||
}
|
||||
|
||||
/* Tries to pull one byte of input to accumulator.
|
||||
Returns false if there is no input available. */
|
||||
func pullByte(br *bitReader) bool {
|
||||
if br.byte_pos == br.input_len {
|
||||
return false
|
||||
}
|
||||
|
||||
br.val_ >>= 8
|
||||
br.val_ |= (uint64(br.input[br.byte_pos])) << 56
|
||||
br.bit_pos_ -= 8
|
||||
br.byte_pos++
|
||||
return true
|
||||
}
|
||||
|
||||
/* Returns currently available bits.
|
||||
The number of valid bits could be calculated by BrotliGetAvailableBits. */
|
||||
func getBitsUnmasked(br *bitReader) uint64 {
|
||||
return br.val_ >> br.bit_pos_
|
||||
}
|
||||
|
||||
/* Like BrotliGetBits, but does not mask the result.
|
||||
The result contains at least 16 valid bits. */
|
||||
func get16BitsUnmasked(br *bitReader) uint32 {
|
||||
fillBitWindow(br, 16)
|
||||
return uint32(getBitsUnmasked(br))
|
||||
}
|
||||
|
||||
/* Returns the specified number of bits from |br| without advancing bit
|
||||
position. */
|
||||
func getBits(br *bitReader, n_bits uint32) uint32 {
|
||||
fillBitWindow(br, n_bits)
|
||||
return uint32(getBitsUnmasked(br)) & bitMask(n_bits)
|
||||
}
|
||||
|
||||
/* Tries to peek the specified amount of bits. Returns false, if there
|
||||
is not enough input. */
|
||||
func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool {
|
||||
for getAvailableBits(br) < n_bits {
|
||||
if !pullByte(br) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
|
||||
return true
|
||||
}
|
||||
|
||||
/* Advances the bit pos by |n_bits|. */
|
||||
func dropBits(br *bitReader, n_bits uint32) {
|
||||
br.bit_pos_ += n_bits
|
||||
}
|
||||
|
||||
func bitReaderUnload(br *bitReader) {
|
||||
var unused_bytes uint32 = getAvailableBits(br) >> 3
|
||||
var unused_bits uint32 = unused_bytes << 3
|
||||
br.byte_pos -= uint(unused_bytes)
|
||||
if unused_bits == 64 {
|
||||
br.val_ = 0
|
||||
} else {
|
||||
br.val_ <<= unused_bits
|
||||
}
|
||||
|
||||
br.bit_pos_ += unused_bits
|
||||
}
|
||||
|
||||
/* Reads the specified number of bits from |br| and advances the bit pos.
|
||||
Precondition: accumulator MUST contain at least |n_bits|. */
|
||||
func takeBits(br *bitReader, n_bits uint32, val *uint32) {
|
||||
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
|
||||
dropBits(br, n_bits)
|
||||
}
|
||||
|
||||
/* Reads the specified number of bits from |br| and advances the bit pos.
|
||||
Assumes that there is enough input to perform BrotliFillBitWindow. */
|
||||
func readBits(br *bitReader, n_bits uint32) uint32 {
|
||||
var val uint32
|
||||
fillBitWindow(br, n_bits)
|
||||
takeBits(br, n_bits, &val)
|
||||
return val
|
||||
}
|
||||
|
||||
/* Tries to read the specified amount of bits. Returns false, if there
|
||||
is not enough input. |n_bits| MUST be positive. */
|
||||
func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
|
||||
for getAvailableBits(br) < n_bits {
|
||||
if !pullByte(br) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
takeBits(br, n_bits, val)
|
||||
return true
|
||||
}
|
||||
|
||||
/* Advances the bit reader position to the next byte boundary and verifies
|
||||
that any skipped bits are set to zero. */
|
||||
func bitReaderJumpToByteBoundary(br *bitReader) bool {
|
||||
var pad_bits_count uint32 = getAvailableBits(br) & 0x7
|
||||
var pad_bits uint32 = 0
|
||||
if pad_bits_count != 0 {
|
||||
takeBits(br, pad_bits_count, &pad_bits)
|
||||
}
|
||||
|
||||
return pad_bits == 0
|
||||
}
|
||||
|
||||
/* Copies remaining input bytes stored in the bit reader to the output. Value
|
||||
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be
|
||||
warmed up again after this. */
|
||||
func copyBytes(dest []byte, br *bitReader, num uint) {
|
||||
for getAvailableBits(br) >= 8 && num > 0 {
|
||||
dest[0] = byte(getBitsUnmasked(br))
|
||||
dropBits(br, 8)
|
||||
dest = dest[1:]
|
||||
num--
|
||||
}
|
||||
|
||||
copy(dest, br.input[br.byte_pos:][:num])
|
||||
br.byte_pos += num
|
||||
}
|
||||
|
||||
func initBitReader(br *bitReader) {
|
||||
br.val_ = 0
|
||||
br.bit_pos_ = 64
|
||||
}
|
||||
|
||||
func warmupBitReader(br *bitReader) bool {
|
||||
/* Fixing alignment after unaligned BrotliFillWindow would result accumulator
|
||||
overflow. If unalignment is caused by BrotliSafeReadBits, then there is
|
||||
enough space in accumulator to fix alignment. */
|
||||
if getAvailableBits(br) == 0 {
|
||||
if !pullByte(br) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
56
vendor/github.com/andybalholm/brotli/bitwriter.go
generated
vendored
Normal file
56
vendor/github.com/andybalholm/brotli/bitwriter.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Write bits into a byte array. */
|
||||
|
||||
type bitWriter struct {
|
||||
dst []byte
|
||||
|
||||
// Data waiting to be written is the low nbits of bits.
|
||||
bits uint64
|
||||
nbits uint
|
||||
}
|
||||
|
||||
func (w *bitWriter) writeBits(nb uint, b uint64) {
|
||||
w.bits |= b << w.nbits
|
||||
w.nbits += nb
|
||||
if w.nbits >= 32 {
|
||||
bits := w.bits
|
||||
w.bits >>= 32
|
||||
w.nbits -= 32
|
||||
w.dst = append(w.dst,
|
||||
byte(bits),
|
||||
byte(bits>>8),
|
||||
byte(bits>>16),
|
||||
byte(bits>>24),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bitWriter) writeSingleBit(bit bool) {
|
||||
if bit {
|
||||
w.writeBits(1, 1)
|
||||
} else {
|
||||
w.writeBits(1, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bitWriter) jumpToByteBoundary() {
|
||||
dst := w.dst
|
||||
for w.nbits != 0 {
|
||||
dst = append(dst, byte(w.bits))
|
||||
w.bits >>= 8
|
||||
if w.nbits > 8 { // Avoid underflow
|
||||
w.nbits -= 8
|
||||
} else {
|
||||
w.nbits = 0
|
||||
}
|
||||
}
|
||||
w.bits = 0
|
||||
w.dst = dst
|
||||
}
|
||||
144
vendor/github.com/andybalholm/brotli/block_splitter.go
generated
vendored
Normal file
144
vendor/github.com/andybalholm/brotli/block_splitter.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Block split point selection utilities. */
|
||||
|
||||
type blockSplit struct {
|
||||
num_types uint
|
||||
num_blocks uint
|
||||
types []byte
|
||||
lengths []uint32
|
||||
types_alloc_size uint
|
||||
lengths_alloc_size uint
|
||||
}
|
||||
|
||||
const (
|
||||
kMaxLiteralHistograms uint = 100
|
||||
kMaxCommandHistograms uint = 50
|
||||
kLiteralBlockSwitchCost float64 = 28.1
|
||||
kCommandBlockSwitchCost float64 = 13.5
|
||||
kDistanceBlockSwitchCost float64 = 14.6
|
||||
kLiteralStrideLength uint = 70
|
||||
kCommandStrideLength uint = 40
|
||||
kSymbolsPerLiteralHistogram uint = 544
|
||||
kSymbolsPerCommandHistogram uint = 530
|
||||
kSymbolsPerDistanceHistogram uint = 544
|
||||
kMinLengthForBlockSplitting uint = 128
|
||||
kIterMulForRefining uint = 2
|
||||
kMinItersForRefining uint = 100
|
||||
)
|
||||
|
||||
func countLiterals(cmds []command) uint {
|
||||
var total_length uint = 0
|
||||
/* Count how many we have. */
|
||||
|
||||
for i := range cmds {
|
||||
total_length += uint(cmds[i].insert_len_)
|
||||
}
|
||||
|
||||
return total_length
|
||||
}
|
||||
|
||||
func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) {
|
||||
var pos uint = 0
|
||||
var from_pos uint = offset & mask
|
||||
for i := range cmds {
|
||||
var insert_len uint = uint(cmds[i].insert_len_)
|
||||
if from_pos+insert_len > mask {
|
||||
var head_size uint = mask + 1 - from_pos
|
||||
copy(literals[pos:], data[from_pos:][:head_size])
|
||||
from_pos = 0
|
||||
pos += head_size
|
||||
insert_len -= head_size
|
||||
}
|
||||
|
||||
if insert_len > 0 {
|
||||
copy(literals[pos:], data[from_pos:][:insert_len])
|
||||
pos += insert_len
|
||||
}
|
||||
|
||||
from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask))
|
||||
}
|
||||
}
|
||||
|
||||
func myRand(seed *uint32) uint32 {
|
||||
/* Initial seed should be 7. In this case, loop length is (1 << 29). */
|
||||
*seed *= 16807
|
||||
|
||||
return *seed
|
||||
}
|
||||
|
||||
func bitCost(count uint) float64 {
|
||||
if count == 0 {
|
||||
return -2.0
|
||||
} else {
|
||||
return fastLog2(count)
|
||||
}
|
||||
}
|
||||
|
||||
const histogramsPerBatch = 64
|
||||
|
||||
const clustersPerBatch = 16
|
||||
|
||||
func initBlockSplit(self *blockSplit) {
|
||||
self.num_types = 0
|
||||
self.num_blocks = 0
|
||||
self.types = self.types[:0]
|
||||
self.lengths = self.lengths[:0]
|
||||
self.types_alloc_size = 0
|
||||
self.lengths_alloc_size = 0
|
||||
}
|
||||
|
||||
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
|
||||
{
|
||||
var literals_count uint = countLiterals(cmds)
|
||||
var literals []byte = make([]byte, literals_count)
|
||||
|
||||
/* Create a continuous array of literals. */
|
||||
copyLiteralsToByteArray(cmds, data, pos, mask, literals)
|
||||
|
||||
/* Create the block split on the array of literals.
|
||||
Literal histograms have alphabet size 256. */
|
||||
splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split)
|
||||
|
||||
literals = nil
|
||||
}
|
||||
{
|
||||
var insert_and_copy_codes []uint16 = make([]uint16, len(cmds))
|
||||
/* Compute prefix codes for commands. */
|
||||
|
||||
for i := range cmds {
|
||||
insert_and_copy_codes[i] = cmds[i].cmd_prefix_
|
||||
}
|
||||
|
||||
/* Create the block split on the array of command prefixes. */
|
||||
splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
|
||||
|
||||
/* TODO: reuse for distances? */
|
||||
|
||||
insert_and_copy_codes = nil
|
||||
}
|
||||
{
|
||||
var distance_prefixes []uint16 = make([]uint16, len(cmds))
|
||||
var j uint = 0
|
||||
/* Create a continuous array of distance prefixes. */
|
||||
|
||||
for i := range cmds {
|
||||
var cmd *command = &cmds[i]
|
||||
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
|
||||
distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
/* Create the block split on the array of distance prefixes. */
|
||||
splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split)
|
||||
|
||||
distance_prefixes = nil
|
||||
}
|
||||
}
|
||||
434
vendor/github.com/andybalholm/brotli/block_splitter_command.go
generated
vendored
Normal file
434
vendor/github.com/andybalholm/brotli/block_splitter_command.go
generated
vendored
Normal file
@ -0,0 +1,434 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) {
|
||||
var seed uint32 = 7
|
||||
var block_length uint = length / num_histograms
|
||||
var i uint
|
||||
clearHistogramsCommand(histograms, num_histograms)
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
var pos uint = length * i / num_histograms
|
||||
if i != 0 {
|
||||
pos += uint(myRand(&seed) % uint32(block_length))
|
||||
}
|
||||
|
||||
if pos+stride >= length {
|
||||
pos = length - stride - 1
|
||||
}
|
||||
|
||||
histogramAddVectorCommand(&histograms[i], data[pos:], stride)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) {
|
||||
var pos uint = 0
|
||||
if stride >= length {
|
||||
stride = length
|
||||
} else {
|
||||
pos = uint(myRand(seed) % uint32(length-stride+1))
|
||||
}
|
||||
|
||||
histogramAddVectorCommand(sample, data[pos:], stride)
|
||||
}
|
||||
|
||||
func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) {
|
||||
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
|
||||
var seed uint32 = 7
|
||||
var iter uint
|
||||
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
|
||||
for iter = 0; iter < iters; iter++ {
|
||||
var sample histogramCommand
|
||||
histogramClearCommand(&sample)
|
||||
randomSampleCommand(&seed, data, length, stride, &sample)
|
||||
histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample)
|
||||
}
|
||||
}
|
||||
|
||||
/* Assigns a block id from the range [0, num_histograms) to each data element
|
||||
in data[0..length) and fills in block_id[0..length) with the assigned values.
|
||||
Returns the number of blocks, i.e. one plus the number of block switches. */
|
||||
func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var num_blocks uint = 1
|
||||
var i uint
|
||||
var j uint
|
||||
assert(num_histograms <= 256)
|
||||
if num_histograms <= 1 {
|
||||
for i = 0; i < length; i++ {
|
||||
block_id[i] = 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := 0; i < int(data_size*num_histograms); i++ {
|
||||
insert_cost[i] = 0
|
||||
}
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
|
||||
}
|
||||
|
||||
for i = data_size; i != 0; {
|
||||
i--
|
||||
for j = 0; j < num_histograms; j++ {
|
||||
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(num_histograms); i++ {
|
||||
cost[i] = 0
|
||||
}
|
||||
for i := 0; i < int(length*bitmaplen); i++ {
|
||||
switch_signal[i] = 0
|
||||
}
|
||||
|
||||
/* After each iteration of this loop, cost[k] will contain the difference
|
||||
between the minimum cost of arriving at the current byte position using
|
||||
entropy code k, and the minimum cost of arriving at the current byte
|
||||
position. This difference is capped at the block switch cost, and if it
|
||||
reaches block switch cost, it means that when we trace back from the last
|
||||
position, we need to switch here. */
|
||||
for i = 0; i < length; i++ {
|
||||
var byte_ix uint = i
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
|
||||
var min_cost float64 = 1e99
|
||||
var block_switch_cost float64 = block_switch_bitcost
|
||||
var k uint
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
/* We are coding the symbol in data[byte_ix] with entropy code k. */
|
||||
cost[k] += insert_cost[insert_cost_ix+k]
|
||||
|
||||
if cost[k] < min_cost {
|
||||
min_cost = cost[k]
|
||||
block_id[byte_ix] = byte(k)
|
||||
}
|
||||
}
|
||||
|
||||
/* More blocks for the beginning. */
|
||||
if byte_ix < 2000 {
|
||||
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
|
||||
}
|
||||
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
cost[k] -= min_cost
|
||||
if cost[k] >= block_switch_cost {
|
||||
var mask byte = byte(1 << (k & 7))
|
||||
cost[k] = block_switch_cost
|
||||
assert(k>>3 < bitmaplen)
|
||||
switch_signal[ix+(k>>3)] |= mask
|
||||
/* Trace back from the last position and switch at the marked places. */
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var byte_ix uint = length - 1
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var cur_id byte = block_id[byte_ix]
|
||||
for byte_ix > 0 {
|
||||
var mask byte = byte(1 << (cur_id & 7))
|
||||
assert(uint(cur_id)>>3 < bitmaplen)
|
||||
byte_ix--
|
||||
ix -= bitmaplen
|
||||
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
|
||||
if cur_id != block_id[byte_ix] {
|
||||
cur_id = block_id[byte_ix]
|
||||
num_blocks++
|
||||
}
|
||||
}
|
||||
|
||||
block_id[byte_ix] = cur_id
|
||||
}
|
||||
}
|
||||
|
||||
return num_blocks
|
||||
}
|
||||
|
||||
var remapBlockIdsCommand_kInvalidId uint16 = 256
|
||||
|
||||
func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
|
||||
var next_id uint16 = 0
|
||||
var i uint
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
new_id[i] = remapBlockIdsCommand_kInvalidId
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId {
|
||||
new_id[block_ids[i]] = next_id
|
||||
next_id++
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
block_ids[i] = byte(new_id[block_ids[i]])
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
}
|
||||
|
||||
assert(uint(next_id) <= num_histograms)
|
||||
return uint(next_id)
|
||||
}
|
||||
|
||||
func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) {
|
||||
var i uint
|
||||
clearHistogramsCommand(histograms, num_histograms)
|
||||
for i = 0; i < length; i++ {
|
||||
histogramAddCommand(&histograms[block_ids[i]], uint(data[i]))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
|
||||
var histogram_symbols []uint32 = make([]uint32, num_blocks)
|
||||
var block_lengths []uint32 = make([]uint32, num_blocks)
|
||||
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
|
||||
var all_histograms_size uint = 0
|
||||
var all_histograms_capacity uint = expected_num_clusters
|
||||
var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity)
|
||||
var cluster_size_size uint = 0
|
||||
var cluster_size_capacity uint = expected_num_clusters
|
||||
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
|
||||
var num_clusters uint = 0
|
||||
var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch))
|
||||
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
|
||||
var pairs_capacity uint = max_num_pairs + 1
|
||||
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
|
||||
var pos uint = 0
|
||||
var clusters []uint32
|
||||
var num_final_clusters uint
|
||||
var new_index []uint32
|
||||
var i uint
|
||||
var sizes = [histogramsPerBatch]uint32{0}
|
||||
var new_clusters = [histogramsPerBatch]uint32{0}
|
||||
var symbols = [histogramsPerBatch]uint32{0}
|
||||
var remap = [histogramsPerBatch]uint32{0}
|
||||
|
||||
for i := 0; i < int(num_blocks); i++ {
|
||||
block_lengths[i] = 0
|
||||
}
|
||||
{
|
||||
var block_idx uint = 0
|
||||
for i = 0; i < length; i++ {
|
||||
assert(block_idx < num_blocks)
|
||||
block_lengths[block_idx]++
|
||||
if i+1 == length || block_ids[i] != block_ids[i+1] {
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
assert(block_idx == num_blocks)
|
||||
}
|
||||
|
||||
for i = 0; i < num_blocks; i += histogramsPerBatch {
|
||||
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
var k uint
|
||||
histogramClearCommand(&histograms[j])
|
||||
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
|
||||
histogramAddCommand(&histograms[j], uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
histograms[j].bit_cost_ = populationCostCommand(&histograms[j])
|
||||
new_clusters[j] = uint32(j)
|
||||
symbols[j] = uint32(j)
|
||||
sizes[j] = 1
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
|
||||
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
|
||||
var _new_size uint
|
||||
if all_histograms_capacity == 0 {
|
||||
_new_size = all_histograms_size + num_new_clusters
|
||||
} else {
|
||||
_new_size = all_histograms_capacity
|
||||
}
|
||||
var new_array []histogramCommand
|
||||
for _new_size < (all_histograms_size + num_new_clusters) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramCommand, _new_size)
|
||||
if all_histograms_capacity != 0 {
|
||||
copy(new_array, all_histograms[:all_histograms_capacity])
|
||||
}
|
||||
|
||||
all_histograms = new_array
|
||||
all_histograms_capacity = _new_size
|
||||
}
|
||||
|
||||
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
|
||||
for j = 0; j < num_new_clusters; j++ {
|
||||
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
|
||||
all_histograms_size++
|
||||
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
|
||||
cluster_size_size++
|
||||
remap[new_clusters[j]] = uint32(j)
|
||||
}
|
||||
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
|
||||
}
|
||||
|
||||
num_clusters += num_new_clusters
|
||||
assert(num_clusters == cluster_size_size)
|
||||
assert(num_clusters == all_histograms_size)
|
||||
}
|
||||
|
||||
histograms = nil
|
||||
|
||||
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < max_num_pairs+1 {
|
||||
pairs = nil
|
||||
pairs = make([]histogramPair, (max_num_pairs + 1))
|
||||
}
|
||||
|
||||
clusters = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
clusters[i] = uint32(i)
|
||||
}
|
||||
|
||||
num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
new_index = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
new_index[i] = clusterBlocksCommand_kInvalidIndex
|
||||
}
|
||||
pos = 0
|
||||
{
|
||||
var next_index uint32 = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
var histo histogramCommand
|
||||
var j uint
|
||||
var best_out uint32
|
||||
var best_bits float64
|
||||
histogramClearCommand(&histo)
|
||||
for j = 0; uint32(j) < block_lengths[i]; j++ {
|
||||
histogramAddCommand(&histo, uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
best_out = histogram_symbols[0]
|
||||
} else {
|
||||
best_out = histogram_symbols[i-1]
|
||||
}
|
||||
best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out])
|
||||
for j = 0; j < num_final_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
histogram_symbols[i] = best_out
|
||||
if new_index[best_out] == clusterBlocksCommand_kInvalidIndex {
|
||||
new_index[best_out] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clusters = nil
|
||||
all_histograms = nil
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
|
||||
{
|
||||
var cur_length uint32 = 0
|
||||
var block_idx uint = 0
|
||||
var max_type byte = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
cur_length += block_lengths[i]
|
||||
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
|
||||
var id byte = byte(new_index[histogram_symbols[i]])
|
||||
split.types[block_idx] = id
|
||||
split.lengths[block_idx] = cur_length
|
||||
max_type = brotli_max_uint8_t(max_type, id)
|
||||
cur_length = 0
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
split.num_blocks = block_idx
|
||||
split.num_types = uint(max_type) + 1
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
block_lengths = nil
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
length := uint(len(data))
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramCommand
|
||||
if num_histograms > max_histograms {
|
||||
num_histograms = max_histograms
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
split.num_types = 1
|
||||
return
|
||||
} else if length < kMinLengthForBlockSplitting {
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
|
||||
split.num_types = 1
|
||||
split.types[split.num_blocks] = 0
|
||||
split.lengths[split.num_blocks] = uint32(length)
|
||||
split.num_blocks++
|
||||
return
|
||||
}
|
||||
|
||||
histograms = make([]histogramCommand, num_histograms)
|
||||
|
||||
/* Find good entropy codes. */
|
||||
initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
|
||||
refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
{
|
||||
var block_ids []byte = make([]byte, length)
|
||||
var num_blocks uint = 0
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
|
||||
var cost []float64 = make([]float64, num_histograms)
|
||||
var switch_signal []byte = make([]byte, (length * bitmaplen))
|
||||
var new_id []uint16 = make([]uint16, num_histograms)
|
||||
var iters uint
|
||||
if params.quality < hqZopflificationQuality {
|
||||
iters = 3
|
||||
} else {
|
||||
iters = 10
|
||||
}
|
||||
/* Find a good path through literals with the good entropy codes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < iters; i++ {
|
||||
num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
|
||||
num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms)
|
||||
buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms)
|
||||
}
|
||||
|
||||
insert_cost = nil
|
||||
cost = nil
|
||||
switch_signal = nil
|
||||
new_id = nil
|
||||
histograms = nil
|
||||
clusterBlocksCommand(data, length, num_blocks, block_ids, split)
|
||||
block_ids = nil
|
||||
}
|
||||
}
|
||||
433
vendor/github.com/andybalholm/brotli/block_splitter_distance.go
generated
vendored
Normal file
433
vendor/github.com/andybalholm/brotli/block_splitter_distance.go
generated
vendored
Normal file
@ -0,0 +1,433 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) {
|
||||
var seed uint32 = 7
|
||||
var block_length uint = length / num_histograms
|
||||
var i uint
|
||||
clearHistogramsDistance(histograms, num_histograms)
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
var pos uint = length * i / num_histograms
|
||||
if i != 0 {
|
||||
pos += uint(myRand(&seed) % uint32(block_length))
|
||||
}
|
||||
|
||||
if pos+stride >= length {
|
||||
pos = length - stride - 1
|
||||
}
|
||||
|
||||
histogramAddVectorDistance(&histograms[i], data[pos:], stride)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) {
|
||||
var pos uint = 0
|
||||
if stride >= length {
|
||||
stride = length
|
||||
} else {
|
||||
pos = uint(myRand(seed) % uint32(length-stride+1))
|
||||
}
|
||||
|
||||
histogramAddVectorDistance(sample, data[pos:], stride)
|
||||
}
|
||||
|
||||
func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) {
|
||||
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
|
||||
var seed uint32 = 7
|
||||
var iter uint
|
||||
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
|
||||
for iter = 0; iter < iters; iter++ {
|
||||
var sample histogramDistance
|
||||
histogramClearDistance(&sample)
|
||||
randomSampleDistance(&seed, data, length, stride, &sample)
|
||||
histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample)
|
||||
}
|
||||
}
|
||||
|
||||
/* Assigns a block id from the range [0, num_histograms) to each data element
|
||||
in data[0..length) and fills in block_id[0..length) with the assigned values.
|
||||
Returns the number of blocks, i.e. one plus the number of block switches. */
|
||||
func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
|
||||
var data_size uint = histogramDataSizeDistance()
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var num_blocks uint = 1
|
||||
var i uint
|
||||
var j uint
|
||||
assert(num_histograms <= 256)
|
||||
if num_histograms <= 1 {
|
||||
for i = 0; i < length; i++ {
|
||||
block_id[i] = 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := 0; i < int(data_size*num_histograms); i++ {
|
||||
insert_cost[i] = 0
|
||||
}
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
|
||||
}
|
||||
|
||||
for i = data_size; i != 0; {
|
||||
i--
|
||||
for j = 0; j < num_histograms; j++ {
|
||||
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(num_histograms); i++ {
|
||||
cost[i] = 0
|
||||
}
|
||||
for i := 0; i < int(length*bitmaplen); i++ {
|
||||
switch_signal[i] = 0
|
||||
}
|
||||
|
||||
/* After each iteration of this loop, cost[k] will contain the difference
|
||||
between the minimum cost of arriving at the current byte position using
|
||||
entropy code k, and the minimum cost of arriving at the current byte
|
||||
position. This difference is capped at the block switch cost, and if it
|
||||
reaches block switch cost, it means that when we trace back from the last
|
||||
position, we need to switch here. */
|
||||
for i = 0; i < length; i++ {
|
||||
var byte_ix uint = i
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
|
||||
var min_cost float64 = 1e99
|
||||
var block_switch_cost float64 = block_switch_bitcost
|
||||
var k uint
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
/* We are coding the symbol in data[byte_ix] with entropy code k. */
|
||||
cost[k] += insert_cost[insert_cost_ix+k]
|
||||
|
||||
if cost[k] < min_cost {
|
||||
min_cost = cost[k]
|
||||
block_id[byte_ix] = byte(k)
|
||||
}
|
||||
}
|
||||
|
||||
/* More blocks for the beginning. */
|
||||
if byte_ix < 2000 {
|
||||
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
|
||||
}
|
||||
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
cost[k] -= min_cost
|
||||
if cost[k] >= block_switch_cost {
|
||||
var mask byte = byte(1 << (k & 7))
|
||||
cost[k] = block_switch_cost
|
||||
assert(k>>3 < bitmaplen)
|
||||
switch_signal[ix+(k>>3)] |= mask
|
||||
/* Trace back from the last position and switch at the marked places. */
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var byte_ix uint = length - 1
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var cur_id byte = block_id[byte_ix]
|
||||
for byte_ix > 0 {
|
||||
var mask byte = byte(1 << (cur_id & 7))
|
||||
assert(uint(cur_id)>>3 < bitmaplen)
|
||||
byte_ix--
|
||||
ix -= bitmaplen
|
||||
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
|
||||
if cur_id != block_id[byte_ix] {
|
||||
cur_id = block_id[byte_ix]
|
||||
num_blocks++
|
||||
}
|
||||
}
|
||||
|
||||
block_id[byte_ix] = cur_id
|
||||
}
|
||||
}
|
||||
|
||||
return num_blocks
|
||||
}
|
||||
|
||||
var remapBlockIdsDistance_kInvalidId uint16 = 256
|
||||
|
||||
func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
|
||||
var next_id uint16 = 0
|
||||
var i uint
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
new_id[i] = remapBlockIdsDistance_kInvalidId
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId {
|
||||
new_id[block_ids[i]] = next_id
|
||||
next_id++
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
block_ids[i] = byte(new_id[block_ids[i]])
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
}
|
||||
|
||||
assert(uint(next_id) <= num_histograms)
|
||||
return uint(next_id)
|
||||
}
|
||||
|
||||
func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) {
|
||||
var i uint
|
||||
clearHistogramsDistance(histograms, num_histograms)
|
||||
for i = 0; i < length; i++ {
|
||||
histogramAddDistance(&histograms[block_ids[i]], uint(data[i]))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
|
||||
var histogram_symbols []uint32 = make([]uint32, num_blocks)
|
||||
var block_lengths []uint32 = make([]uint32, num_blocks)
|
||||
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
|
||||
var all_histograms_size uint = 0
|
||||
var all_histograms_capacity uint = expected_num_clusters
|
||||
var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity)
|
||||
var cluster_size_size uint = 0
|
||||
var cluster_size_capacity uint = expected_num_clusters
|
||||
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
|
||||
var num_clusters uint = 0
|
||||
var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch))
|
||||
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
|
||||
var pairs_capacity uint = max_num_pairs + 1
|
||||
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
|
||||
var pos uint = 0
|
||||
var clusters []uint32
|
||||
var num_final_clusters uint
|
||||
var new_index []uint32
|
||||
var i uint
|
||||
var sizes = [histogramsPerBatch]uint32{0}
|
||||
var new_clusters = [histogramsPerBatch]uint32{0}
|
||||
var symbols = [histogramsPerBatch]uint32{0}
|
||||
var remap = [histogramsPerBatch]uint32{0}
|
||||
|
||||
for i := 0; i < int(num_blocks); i++ {
|
||||
block_lengths[i] = 0
|
||||
}
|
||||
{
|
||||
var block_idx uint = 0
|
||||
for i = 0; i < length; i++ {
|
||||
assert(block_idx < num_blocks)
|
||||
block_lengths[block_idx]++
|
||||
if i+1 == length || block_ids[i] != block_ids[i+1] {
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
assert(block_idx == num_blocks)
|
||||
}
|
||||
|
||||
for i = 0; i < num_blocks; i += histogramsPerBatch {
|
||||
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
var k uint
|
||||
histogramClearDistance(&histograms[j])
|
||||
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
|
||||
histogramAddDistance(&histograms[j], uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
histograms[j].bit_cost_ = populationCostDistance(&histograms[j])
|
||||
new_clusters[j] = uint32(j)
|
||||
symbols[j] = uint32(j)
|
||||
sizes[j] = 1
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
|
||||
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
|
||||
var _new_size uint
|
||||
if all_histograms_capacity == 0 {
|
||||
_new_size = all_histograms_size + num_new_clusters
|
||||
} else {
|
||||
_new_size = all_histograms_capacity
|
||||
}
|
||||
var new_array []histogramDistance
|
||||
for _new_size < (all_histograms_size + num_new_clusters) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramDistance, _new_size)
|
||||
if all_histograms_capacity != 0 {
|
||||
copy(new_array, all_histograms[:all_histograms_capacity])
|
||||
}
|
||||
|
||||
all_histograms = new_array
|
||||
all_histograms_capacity = _new_size
|
||||
}
|
||||
|
||||
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
|
||||
for j = 0; j < num_new_clusters; j++ {
|
||||
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
|
||||
all_histograms_size++
|
||||
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
|
||||
cluster_size_size++
|
||||
remap[new_clusters[j]] = uint32(j)
|
||||
}
|
||||
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
|
||||
}
|
||||
|
||||
num_clusters += num_new_clusters
|
||||
assert(num_clusters == cluster_size_size)
|
||||
assert(num_clusters == all_histograms_size)
|
||||
}
|
||||
|
||||
histograms = nil
|
||||
|
||||
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < max_num_pairs+1 {
|
||||
pairs = nil
|
||||
pairs = make([]histogramPair, (max_num_pairs + 1))
|
||||
}
|
||||
|
||||
clusters = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
clusters[i] = uint32(i)
|
||||
}
|
||||
|
||||
num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
new_index = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
new_index[i] = clusterBlocksDistance_kInvalidIndex
|
||||
}
|
||||
pos = 0
|
||||
{
|
||||
var next_index uint32 = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
var histo histogramDistance
|
||||
var j uint
|
||||
var best_out uint32
|
||||
var best_bits float64
|
||||
histogramClearDistance(&histo)
|
||||
for j = 0; uint32(j) < block_lengths[i]; j++ {
|
||||
histogramAddDistance(&histo, uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
best_out = histogram_symbols[0]
|
||||
} else {
|
||||
best_out = histogram_symbols[i-1]
|
||||
}
|
||||
best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out])
|
||||
for j = 0; j < num_final_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
histogram_symbols[i] = best_out
|
||||
if new_index[best_out] == clusterBlocksDistance_kInvalidIndex {
|
||||
new_index[best_out] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clusters = nil
|
||||
all_histograms = nil
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
|
||||
{
|
||||
var cur_length uint32 = 0
|
||||
var block_idx uint = 0
|
||||
var max_type byte = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
cur_length += block_lengths[i]
|
||||
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
|
||||
var id byte = byte(new_index[histogram_symbols[i]])
|
||||
split.types[block_idx] = id
|
||||
split.lengths[block_idx] = cur_length
|
||||
max_type = brotli_max_uint8_t(max_type, id)
|
||||
cur_length = 0
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
split.num_blocks = block_idx
|
||||
split.num_types = uint(max_type) + 1
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
block_lengths = nil
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
var data_size uint = histogramDataSizeDistance()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramDistance
|
||||
if num_histograms > max_histograms {
|
||||
num_histograms = max_histograms
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
split.num_types = 1
|
||||
return
|
||||
} else if length < kMinLengthForBlockSplitting {
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
|
||||
split.num_types = 1
|
||||
split.types[split.num_blocks] = 0
|
||||
split.lengths[split.num_blocks] = uint32(length)
|
||||
split.num_blocks++
|
||||
return
|
||||
}
|
||||
|
||||
histograms = make([]histogramDistance, num_histograms)
|
||||
|
||||
/* Find good entropy codes. */
|
||||
initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
|
||||
refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
{
|
||||
var block_ids []byte = make([]byte, length)
|
||||
var num_blocks uint = 0
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
|
||||
var cost []float64 = make([]float64, num_histograms)
|
||||
var switch_signal []byte = make([]byte, (length * bitmaplen))
|
||||
var new_id []uint16 = make([]uint16, num_histograms)
|
||||
var iters uint
|
||||
if params.quality < hqZopflificationQuality {
|
||||
iters = 3
|
||||
} else {
|
||||
iters = 10
|
||||
}
|
||||
/* Find a good path through literals with the good entropy codes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < iters; i++ {
|
||||
num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
|
||||
num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms)
|
||||
buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms)
|
||||
}
|
||||
|
||||
insert_cost = nil
|
||||
cost = nil
|
||||
switch_signal = nil
|
||||
new_id = nil
|
||||
histograms = nil
|
||||
clusterBlocksDistance(data, length, num_blocks, block_ids, split)
|
||||
block_ids = nil
|
||||
}
|
||||
}
|
||||
433
vendor/github.com/andybalholm/brotli/block_splitter_literal.go
generated
vendored
Normal file
433
vendor/github.com/andybalholm/brotli/block_splitter_literal.go
generated
vendored
Normal file
@ -0,0 +1,433 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) {
|
||||
var seed uint32 = 7
|
||||
var block_length uint = length / num_histograms
|
||||
var i uint
|
||||
clearHistogramsLiteral(histograms, num_histograms)
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
var pos uint = length * i / num_histograms
|
||||
if i != 0 {
|
||||
pos += uint(myRand(&seed) % uint32(block_length))
|
||||
}
|
||||
|
||||
if pos+stride >= length {
|
||||
pos = length - stride - 1
|
||||
}
|
||||
|
||||
histogramAddVectorLiteral(&histograms[i], data[pos:], stride)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) {
|
||||
var pos uint = 0
|
||||
if stride >= length {
|
||||
stride = length
|
||||
} else {
|
||||
pos = uint(myRand(seed) % uint32(length-stride+1))
|
||||
}
|
||||
|
||||
histogramAddVectorLiteral(sample, data[pos:], stride)
|
||||
}
|
||||
|
||||
func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) {
|
||||
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
|
||||
var seed uint32 = 7
|
||||
var iter uint
|
||||
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
|
||||
for iter = 0; iter < iters; iter++ {
|
||||
var sample histogramLiteral
|
||||
histogramClearLiteral(&sample)
|
||||
randomSampleLiteral(&seed, data, length, stride, &sample)
|
||||
histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample)
|
||||
}
|
||||
}
|
||||
|
||||
/* Assigns a block id from the range [0, num_histograms) to each data element
|
||||
in data[0..length) and fills in block_id[0..length) with the assigned values.
|
||||
Returns the number of blocks, i.e. one plus the number of block switches. */
|
||||
func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
|
||||
var data_size uint = histogramDataSizeLiteral()
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var num_blocks uint = 1
|
||||
var i uint
|
||||
var j uint
|
||||
assert(num_histograms <= 256)
|
||||
if num_histograms <= 1 {
|
||||
for i = 0; i < length; i++ {
|
||||
block_id[i] = 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := 0; i < int(data_size*num_histograms); i++ {
|
||||
insert_cost[i] = 0
|
||||
}
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
|
||||
}
|
||||
|
||||
for i = data_size; i != 0; {
|
||||
i--
|
||||
for j = 0; j < num_histograms; j++ {
|
||||
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(num_histograms); i++ {
|
||||
cost[i] = 0
|
||||
}
|
||||
for i := 0; i < int(length*bitmaplen); i++ {
|
||||
switch_signal[i] = 0
|
||||
}
|
||||
|
||||
/* After each iteration of this loop, cost[k] will contain the difference
|
||||
between the minimum cost of arriving at the current byte position using
|
||||
entropy code k, and the minimum cost of arriving at the current byte
|
||||
position. This difference is capped at the block switch cost, and if it
|
||||
reaches block switch cost, it means that when we trace back from the last
|
||||
position, we need to switch here. */
|
||||
for i = 0; i < length; i++ {
|
||||
var byte_ix uint = i
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
|
||||
var min_cost float64 = 1e99
|
||||
var block_switch_cost float64 = block_switch_bitcost
|
||||
var k uint
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
/* We are coding the symbol in data[byte_ix] with entropy code k. */
|
||||
cost[k] += insert_cost[insert_cost_ix+k]
|
||||
|
||||
if cost[k] < min_cost {
|
||||
min_cost = cost[k]
|
||||
block_id[byte_ix] = byte(k)
|
||||
}
|
||||
}
|
||||
|
||||
/* More blocks for the beginning. */
|
||||
if byte_ix < 2000 {
|
||||
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
|
||||
}
|
||||
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
cost[k] -= min_cost
|
||||
if cost[k] >= block_switch_cost {
|
||||
var mask byte = byte(1 << (k & 7))
|
||||
cost[k] = block_switch_cost
|
||||
assert(k>>3 < bitmaplen)
|
||||
switch_signal[ix+(k>>3)] |= mask
|
||||
/* Trace back from the last position and switch at the marked places. */
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var byte_ix uint = length - 1
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var cur_id byte = block_id[byte_ix]
|
||||
for byte_ix > 0 {
|
||||
var mask byte = byte(1 << (cur_id & 7))
|
||||
assert(uint(cur_id)>>3 < bitmaplen)
|
||||
byte_ix--
|
||||
ix -= bitmaplen
|
||||
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
|
||||
if cur_id != block_id[byte_ix] {
|
||||
cur_id = block_id[byte_ix]
|
||||
num_blocks++
|
||||
}
|
||||
}
|
||||
|
||||
block_id[byte_ix] = cur_id
|
||||
}
|
||||
}
|
||||
|
||||
return num_blocks
|
||||
}
|
||||
|
||||
var remapBlockIdsLiteral_kInvalidId uint16 = 256
|
||||
|
||||
func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
|
||||
var next_id uint16 = 0
|
||||
var i uint
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
new_id[i] = remapBlockIdsLiteral_kInvalidId
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId {
|
||||
new_id[block_ids[i]] = next_id
|
||||
next_id++
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
block_ids[i] = byte(new_id[block_ids[i]])
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
}
|
||||
|
||||
assert(uint(next_id) <= num_histograms)
|
||||
return uint(next_id)
|
||||
}
|
||||
|
||||
func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) {
|
||||
var i uint
|
||||
clearHistogramsLiteral(histograms, num_histograms)
|
||||
for i = 0; i < length; i++ {
|
||||
histogramAddLiteral(&histograms[block_ids[i]], uint(data[i]))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
|
||||
var histogram_symbols []uint32 = make([]uint32, num_blocks)
|
||||
var block_lengths []uint32 = make([]uint32, num_blocks)
|
||||
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
|
||||
var all_histograms_size uint = 0
|
||||
var all_histograms_capacity uint = expected_num_clusters
|
||||
var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity)
|
||||
var cluster_size_size uint = 0
|
||||
var cluster_size_capacity uint = expected_num_clusters
|
||||
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
|
||||
var num_clusters uint = 0
|
||||
var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch))
|
||||
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
|
||||
var pairs_capacity uint = max_num_pairs + 1
|
||||
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
|
||||
var pos uint = 0
|
||||
var clusters []uint32
|
||||
var num_final_clusters uint
|
||||
var new_index []uint32
|
||||
var i uint
|
||||
var sizes = [histogramsPerBatch]uint32{0}
|
||||
var new_clusters = [histogramsPerBatch]uint32{0}
|
||||
var symbols = [histogramsPerBatch]uint32{0}
|
||||
var remap = [histogramsPerBatch]uint32{0}
|
||||
|
||||
for i := 0; i < int(num_blocks); i++ {
|
||||
block_lengths[i] = 0
|
||||
}
|
||||
{
|
||||
var block_idx uint = 0
|
||||
for i = 0; i < length; i++ {
|
||||
assert(block_idx < num_blocks)
|
||||
block_lengths[block_idx]++
|
||||
if i+1 == length || block_ids[i] != block_ids[i+1] {
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
assert(block_idx == num_blocks)
|
||||
}
|
||||
|
||||
for i = 0; i < num_blocks; i += histogramsPerBatch {
|
||||
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
var k uint
|
||||
histogramClearLiteral(&histograms[j])
|
||||
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
|
||||
histogramAddLiteral(&histograms[j], uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
histograms[j].bit_cost_ = populationCostLiteral(&histograms[j])
|
||||
new_clusters[j] = uint32(j)
|
||||
symbols[j] = uint32(j)
|
||||
sizes[j] = 1
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
|
||||
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
|
||||
var _new_size uint
|
||||
if all_histograms_capacity == 0 {
|
||||
_new_size = all_histograms_size + num_new_clusters
|
||||
} else {
|
||||
_new_size = all_histograms_capacity
|
||||
}
|
||||
var new_array []histogramLiteral
|
||||
for _new_size < (all_histograms_size + num_new_clusters) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramLiteral, _new_size)
|
||||
if all_histograms_capacity != 0 {
|
||||
copy(new_array, all_histograms[:all_histograms_capacity])
|
||||
}
|
||||
|
||||
all_histograms = new_array
|
||||
all_histograms_capacity = _new_size
|
||||
}
|
||||
|
||||
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
|
||||
for j = 0; j < num_new_clusters; j++ {
|
||||
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
|
||||
all_histograms_size++
|
||||
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
|
||||
cluster_size_size++
|
||||
remap[new_clusters[j]] = uint32(j)
|
||||
}
|
||||
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
|
||||
}
|
||||
|
||||
num_clusters += num_new_clusters
|
||||
assert(num_clusters == cluster_size_size)
|
||||
assert(num_clusters == all_histograms_size)
|
||||
}
|
||||
|
||||
histograms = nil
|
||||
|
||||
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < max_num_pairs+1 {
|
||||
pairs = nil
|
||||
pairs = make([]histogramPair, (max_num_pairs + 1))
|
||||
}
|
||||
|
||||
clusters = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
clusters[i] = uint32(i)
|
||||
}
|
||||
|
||||
num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
new_index = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
new_index[i] = clusterBlocksLiteral_kInvalidIndex
|
||||
}
|
||||
pos = 0
|
||||
{
|
||||
var next_index uint32 = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
var histo histogramLiteral
|
||||
var j uint
|
||||
var best_out uint32
|
||||
var best_bits float64
|
||||
histogramClearLiteral(&histo)
|
||||
for j = 0; uint32(j) < block_lengths[i]; j++ {
|
||||
histogramAddLiteral(&histo, uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
best_out = histogram_symbols[0]
|
||||
} else {
|
||||
best_out = histogram_symbols[i-1]
|
||||
}
|
||||
best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out])
|
||||
for j = 0; j < num_final_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
histogram_symbols[i] = best_out
|
||||
if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex {
|
||||
new_index[best_out] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clusters = nil
|
||||
all_histograms = nil
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
|
||||
{
|
||||
var cur_length uint32 = 0
|
||||
var block_idx uint = 0
|
||||
var max_type byte = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
cur_length += block_lengths[i]
|
||||
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
|
||||
var id byte = byte(new_index[histogram_symbols[i]])
|
||||
split.types[block_idx] = id
|
||||
split.lengths[block_idx] = cur_length
|
||||
max_type = brotli_max_uint8_t(max_type, id)
|
||||
cur_length = 0
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
split.num_blocks = block_idx
|
||||
split.num_types = uint(max_type) + 1
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
block_lengths = nil
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
var data_size uint = histogramDataSizeLiteral()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramLiteral
|
||||
if num_histograms > max_histograms {
|
||||
num_histograms = max_histograms
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
split.num_types = 1
|
||||
return
|
||||
} else if length < kMinLengthForBlockSplitting {
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
|
||||
split.num_types = 1
|
||||
split.types[split.num_blocks] = 0
|
||||
split.lengths[split.num_blocks] = uint32(length)
|
||||
split.num_blocks++
|
||||
return
|
||||
}
|
||||
|
||||
histograms = make([]histogramLiteral, num_histograms)
|
||||
|
||||
/* Find good entropy codes. */
|
||||
initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
|
||||
refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
{
|
||||
var block_ids []byte = make([]byte, length)
|
||||
var num_blocks uint = 0
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
|
||||
var cost []float64 = make([]float64, num_histograms)
|
||||
var switch_signal []byte = make([]byte, (length * bitmaplen))
|
||||
var new_id []uint16 = make([]uint16, num_histograms)
|
||||
var iters uint
|
||||
if params.quality < hqZopflificationQuality {
|
||||
iters = 3
|
||||
} else {
|
||||
iters = 10
|
||||
}
|
||||
/* Find a good path through literals with the good entropy codes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < iters; i++ {
|
||||
num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
|
||||
num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms)
|
||||
buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms)
|
||||
}
|
||||
|
||||
insert_cost = nil
|
||||
cost = nil
|
||||
switch_signal = nil
|
||||
new_id = nil
|
||||
histograms = nil
|
||||
clusterBlocksLiteral(data, length, num_blocks, block_ids, split)
|
||||
block_ids = nil
|
||||
}
|
||||
}
|
||||
1539
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
generated
vendored
Normal file
1539
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
30
vendor/github.com/andybalholm/brotli/cluster.go
generated
vendored
Normal file
30
vendor/github.com/andybalholm/brotli/cluster.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Functions for clustering similar histograms together. */
|
||||
|
||||
type histogramPair struct {
|
||||
idx1 uint32
|
||||
idx2 uint32
|
||||
cost_combo float64
|
||||
cost_diff float64
|
||||
}
|
||||
|
||||
func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool {
|
||||
if p1.cost_diff != p2.cost_diff {
|
||||
return p1.cost_diff > p2.cost_diff
|
||||
}
|
||||
|
||||
return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1)
|
||||
}
|
||||
|
||||
/* Returns entropy reduction of the context map when we combine two clusters. */
|
||||
func clusterCostDiff(size_a uint, size_b uint) float64 {
|
||||
var size_c uint = size_a + size_b
|
||||
return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c)
|
||||
}
|
||||
164
vendor/github.com/andybalholm/brotli/cluster_command.go
generated
vendored
Normal file
164
vendor/github.com/andybalholm/brotli/cluster_command.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
|
||||
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
|
||||
func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
|
||||
var is_good_pair bool = false
|
||||
var p histogramPair
|
||||
p.idx2 = 0
|
||||
p.idx1 = p.idx2
|
||||
p.cost_combo = 0
|
||||
p.cost_diff = p.cost_combo
|
||||
if idx1 == idx2 {
|
||||
return
|
||||
}
|
||||
|
||||
if idx2 < idx1 {
|
||||
var t uint32 = idx2
|
||||
idx2 = idx1
|
||||
idx1 = t
|
||||
}
|
||||
|
||||
p.idx1 = idx1
|
||||
p.idx2 = idx2
|
||||
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
|
||||
p.cost_diff -= out[idx1].bit_cost_
|
||||
p.cost_diff -= out[idx2].bit_cost_
|
||||
|
||||
if out[idx1].total_count_ == 0 {
|
||||
p.cost_combo = out[idx2].bit_cost_
|
||||
is_good_pair = true
|
||||
} else if out[idx2].total_count_ == 0 {
|
||||
p.cost_combo = out[idx1].bit_cost_
|
||||
is_good_pair = true
|
||||
} else {
|
||||
var threshold float64
|
||||
if *num_pairs == 0 {
|
||||
threshold = 1e99
|
||||
} else {
|
||||
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
|
||||
}
|
||||
var combo histogramCommand = out[idx1]
|
||||
var cost_combo float64
|
||||
histogramAddHistogramCommand(&combo, &out[idx2])
|
||||
cost_combo = populationCostCommand(&combo)
|
||||
if cost_combo < threshold-p.cost_diff {
|
||||
p.cost_combo = cost_combo
|
||||
is_good_pair = true
|
||||
}
|
||||
}
|
||||
|
||||
if is_good_pair {
|
||||
p.cost_diff += p.cost_combo
|
||||
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = pairs[0]
|
||||
(*num_pairs)++
|
||||
}
|
||||
|
||||
pairs[0] = p
|
||||
} else if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = p
|
||||
(*num_pairs)++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
|
||||
var cost_diff_threshold float64 = 0.0
|
||||
var min_cluster_size uint = 1
|
||||
var num_pairs uint = 0
|
||||
{
|
||||
/* We maintain a vector of histogram pairs, with the property that the pair
|
||||
with the maximum bit cost reduction is the first. */
|
||||
var idx1 uint
|
||||
for idx1 = 0; idx1 < num_clusters; idx1++ {
|
||||
var idx2 uint
|
||||
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
|
||||
compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for num_clusters > min_cluster_size {
|
||||
var best_idx1 uint32
|
||||
var best_idx2 uint32
|
||||
var i uint
|
||||
if pairs[0].cost_diff >= cost_diff_threshold {
|
||||
cost_diff_threshold = 1e99
|
||||
min_cluster_size = max_clusters
|
||||
continue
|
||||
}
|
||||
|
||||
/* Take the best pair from the top of heap. */
|
||||
best_idx1 = pairs[0].idx1
|
||||
|
||||
best_idx2 = pairs[0].idx2
|
||||
histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2])
|
||||
out[best_idx1].bit_cost_ = pairs[0].cost_combo
|
||||
cluster_size[best_idx1] += cluster_size[best_idx2]
|
||||
for i = 0; i < symbols_size; i++ {
|
||||
if symbols[i] == best_idx2 {
|
||||
symbols[i] = best_idx1
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
if clusters[i] == best_idx2 {
|
||||
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
num_clusters--
|
||||
{
|
||||
/* Remove pairs intersecting the just combined best pair. */
|
||||
var copy_to_idx uint = 0
|
||||
for i = 0; i < num_pairs; i++ {
|
||||
var p *histogramPair = &pairs[i]
|
||||
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
|
||||
/* Remove invalid pair from the queue. */
|
||||
continue
|
||||
}
|
||||
|
||||
if histogramPairIsLess(&pairs[0], p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
var front histogramPair = pairs[0]
|
||||
pairs[0] = *p
|
||||
pairs[copy_to_idx] = front
|
||||
} else {
|
||||
pairs[copy_to_idx] = *p
|
||||
}
|
||||
|
||||
copy_to_idx++
|
||||
}
|
||||
|
||||
num_pairs = copy_to_idx
|
||||
}
|
||||
|
||||
/* Push new pairs formed with the combined histogram to the heap. */
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
|
||||
return num_clusters
|
||||
}
|
||||
|
||||
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
|
||||
func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 {
|
||||
if histogram.total_count_ == 0 {
|
||||
return 0.0
|
||||
} else {
|
||||
var tmp histogramCommand = *histogram
|
||||
histogramAddHistogramCommand(&tmp, candidate)
|
||||
return populationCostCommand(&tmp) - candidate.bit_cost_
|
||||
}
|
||||
}
|
||||
326
vendor/github.com/andybalholm/brotli/cluster_distance.go
generated
vendored
Normal file
326
vendor/github.com/andybalholm/brotli/cluster_distance.go
generated
vendored
Normal file
@ -0,0 +1,326 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
|
||||
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
|
||||
func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
|
||||
var is_good_pair bool = false
|
||||
var p histogramPair
|
||||
p.idx2 = 0
|
||||
p.idx1 = p.idx2
|
||||
p.cost_combo = 0
|
||||
p.cost_diff = p.cost_combo
|
||||
if idx1 == idx2 {
|
||||
return
|
||||
}
|
||||
|
||||
if idx2 < idx1 {
|
||||
var t uint32 = idx2
|
||||
idx2 = idx1
|
||||
idx1 = t
|
||||
}
|
||||
|
||||
p.idx1 = idx1
|
||||
p.idx2 = idx2
|
||||
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
|
||||
p.cost_diff -= out[idx1].bit_cost_
|
||||
p.cost_diff -= out[idx2].bit_cost_
|
||||
|
||||
if out[idx1].total_count_ == 0 {
|
||||
p.cost_combo = out[idx2].bit_cost_
|
||||
is_good_pair = true
|
||||
} else if out[idx2].total_count_ == 0 {
|
||||
p.cost_combo = out[idx1].bit_cost_
|
||||
is_good_pair = true
|
||||
} else {
|
||||
var threshold float64
|
||||
if *num_pairs == 0 {
|
||||
threshold = 1e99
|
||||
} else {
|
||||
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
|
||||
}
|
||||
var combo histogramDistance = out[idx1]
|
||||
var cost_combo float64
|
||||
histogramAddHistogramDistance(&combo, &out[idx2])
|
||||
cost_combo = populationCostDistance(&combo)
|
||||
if cost_combo < threshold-p.cost_diff {
|
||||
p.cost_combo = cost_combo
|
||||
is_good_pair = true
|
||||
}
|
||||
}
|
||||
|
||||
if is_good_pair {
|
||||
p.cost_diff += p.cost_combo
|
||||
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = pairs[0]
|
||||
(*num_pairs)++
|
||||
}
|
||||
|
||||
pairs[0] = p
|
||||
} else if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = p
|
||||
(*num_pairs)++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func histogramCombineDistance(out []histogramDistance, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
|
||||
var cost_diff_threshold float64 = 0.0
|
||||
var min_cluster_size uint = 1
|
||||
var num_pairs uint = 0
|
||||
{
|
||||
/* We maintain a vector of histogram pairs, with the property that the pair
|
||||
with the maximum bit cost reduction is the first. */
|
||||
var idx1 uint
|
||||
for idx1 = 0; idx1 < num_clusters; idx1++ {
|
||||
var idx2 uint
|
||||
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
|
||||
compareAndPushToQueueDistance(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for num_clusters > min_cluster_size {
|
||||
var best_idx1 uint32
|
||||
var best_idx2 uint32
|
||||
var i uint
|
||||
if pairs[0].cost_diff >= cost_diff_threshold {
|
||||
cost_diff_threshold = 1e99
|
||||
min_cluster_size = max_clusters
|
||||
continue
|
||||
}
|
||||
|
||||
/* Take the best pair from the top of heap. */
|
||||
best_idx1 = pairs[0].idx1
|
||||
|
||||
best_idx2 = pairs[0].idx2
|
||||
histogramAddHistogramDistance(&out[best_idx1], &out[best_idx2])
|
||||
out[best_idx1].bit_cost_ = pairs[0].cost_combo
|
||||
cluster_size[best_idx1] += cluster_size[best_idx2]
|
||||
for i = 0; i < symbols_size; i++ {
|
||||
if symbols[i] == best_idx2 {
|
||||
symbols[i] = best_idx1
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
if clusters[i] == best_idx2 {
|
||||
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
num_clusters--
|
||||
{
|
||||
/* Remove pairs intersecting the just combined best pair. */
|
||||
var copy_to_idx uint = 0
|
||||
for i = 0; i < num_pairs; i++ {
|
||||
var p *histogramPair = &pairs[i]
|
||||
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
|
||||
/* Remove invalid pair from the queue. */
|
||||
continue
|
||||
}
|
||||
|
||||
if histogramPairIsLess(&pairs[0], p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
var front histogramPair = pairs[0]
|
||||
pairs[0] = *p
|
||||
pairs[copy_to_idx] = front
|
||||
} else {
|
||||
pairs[copy_to_idx] = *p
|
||||
}
|
||||
|
||||
copy_to_idx++
|
||||
}
|
||||
|
||||
num_pairs = copy_to_idx
|
||||
}
|
||||
|
||||
/* Push new pairs formed with the combined histogram to the heap. */
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
compareAndPushToQueueDistance(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
|
||||
return num_clusters
|
||||
}
|
||||
|
||||
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
|
||||
func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *histogramDistance) float64 {
|
||||
if histogram.total_count_ == 0 {
|
||||
return 0.0
|
||||
} else {
|
||||
var tmp histogramDistance = *histogram
|
||||
histogramAddHistogramDistance(&tmp, candidate)
|
||||
return populationCostDistance(&tmp) - candidate.bit_cost_
|
||||
}
|
||||
}
|
||||
|
||||
/* Find the best 'out' histogram for each of the 'in' histograms.
|
||||
When called, clusters[0..num_clusters) contains the unique values from
|
||||
symbols[0..in_size), but this property is not preserved in this function.
|
||||
Note: we assume that out[]->bit_cost_ is already up-to-date. */
|
||||
func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) {
|
||||
var i uint
|
||||
for i = 0; i < in_size; i++ {
|
||||
var best_out uint32
|
||||
if i == 0 {
|
||||
best_out = symbols[0]
|
||||
} else {
|
||||
best_out = symbols[i-1]
|
||||
}
|
||||
var best_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[best_out])
|
||||
var j uint
|
||||
for j = 0; j < num_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
symbols[i] = best_out
|
||||
}
|
||||
|
||||
/* Recompute each out based on raw and symbols. */
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
histogramClearDistance(&out[clusters[i]])
|
||||
}
|
||||
|
||||
for i = 0; i < in_size; i++ {
|
||||
histogramAddHistogramDistance(&out[symbols[i]], &in[i])
|
||||
}
|
||||
}
|
||||
|
||||
/* Reorders elements of the out[0..length) array and changes values in
|
||||
symbols[0..length) array in the following way:
|
||||
* when called, symbols[] contains indexes into out[], and has N unique
|
||||
values (possibly N < length)
|
||||
* on return, symbols'[i] = f(symbols[i]) and
|
||||
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
|
||||
where f is a bijection between the range of symbols[] and [0..N), and
|
||||
the first occurrences of values in symbols'[i] come in consecutive
|
||||
increasing order.
|
||||
Returns N, the number of unique values in symbols[]. */
|
||||
|
||||
var histogramReindexDistance_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func histogramReindexDistance(out []histogramDistance, symbols []uint32, length uint) uint {
|
||||
var new_index []uint32 = make([]uint32, length)
|
||||
var next_index uint32
|
||||
var tmp []histogramDistance
|
||||
var i uint
|
||||
for i = 0; i < length; i++ {
|
||||
new_index[i] = histogramReindexDistance_kInvalidIndex
|
||||
}
|
||||
|
||||
next_index = 0
|
||||
for i = 0; i < length; i++ {
|
||||
if new_index[symbols[i]] == histogramReindexDistance_kInvalidIndex {
|
||||
new_index[symbols[i]] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
|
||||
tmp and reduce the number of copying by the factor of 2. */
|
||||
tmp = make([]histogramDistance, next_index)
|
||||
|
||||
next_index = 0
|
||||
for i = 0; i < length; i++ {
|
||||
if new_index[symbols[i]] == next_index {
|
||||
tmp[next_index] = out[symbols[i]]
|
||||
next_index++
|
||||
}
|
||||
|
||||
symbols[i] = new_index[symbols[i]]
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
for i = 0; uint32(i) < next_index; i++ {
|
||||
out[i] = tmp[i]
|
||||
}
|
||||
|
||||
tmp = nil
|
||||
return uint(next_index)
|
||||
}
|
||||
|
||||
func clusterHistogramsDistance(in []histogramDistance, in_size uint, max_histograms uint, out []histogramDistance, out_size *uint, histogram_symbols []uint32) {
|
||||
var cluster_size []uint32 = make([]uint32, in_size)
|
||||
var clusters []uint32 = make([]uint32, in_size)
|
||||
var num_clusters uint = 0
|
||||
var max_input_histograms uint = 64
|
||||
var pairs_capacity uint = max_input_histograms * max_input_histograms / 2
|
||||
var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1))
|
||||
var i uint
|
||||
|
||||
/* For the first pass of clustering, we allow all pairs. */
|
||||
for i = 0; i < in_size; i++ {
|
||||
cluster_size[i] = 1
|
||||
}
|
||||
|
||||
for i = 0; i < in_size; i++ {
|
||||
out[i] = in[i]
|
||||
out[i].bit_cost_ = populationCostDistance(&in[i])
|
||||
histogram_symbols[i] = uint32(i)
|
||||
}
|
||||
|
||||
for i = 0; i < in_size; i += max_input_histograms {
|
||||
var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
clusters[num_clusters+j] = uint32(i + j)
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity)
|
||||
num_clusters += num_new_clusters
|
||||
}
|
||||
{
|
||||
/* For the second pass, we limit the total number of histogram pairs.
|
||||
After this limit is reached, we only keep searching for the best pair. */
|
||||
var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < (max_num_pairs + 1) {
|
||||
var _new_size uint
|
||||
if pairs_capacity == 0 {
|
||||
_new_size = max_num_pairs + 1
|
||||
} else {
|
||||
_new_size = pairs_capacity
|
||||
}
|
||||
var new_array []histogramPair
|
||||
for _new_size < (max_num_pairs + 1) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramPair, _new_size)
|
||||
if pairs_capacity != 0 {
|
||||
copy(new_array, pairs[:pairs_capacity])
|
||||
}
|
||||
|
||||
pairs = new_array
|
||||
pairs_capacity = _new_size
|
||||
}
|
||||
|
||||
/* Collapse similar histograms. */
|
||||
num_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs)
|
||||
}
|
||||
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
/* Find the optimal map from original histograms to the final ones. */
|
||||
histogramRemapDistance(in, in_size, clusters, num_clusters, out, histogram_symbols)
|
||||
|
||||
clusters = nil
|
||||
|
||||
/* Convert the context map to a canonical form. */
|
||||
*out_size = histogramReindexDistance(out, histogram_symbols, in_size)
|
||||
}
|
||||
326
vendor/github.com/andybalholm/brotli/cluster_literal.go
generated
vendored
Normal file
326
vendor/github.com/andybalholm/brotli/cluster_literal.go
generated
vendored
Normal file
@ -0,0 +1,326 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
|
||||
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
|
||||
func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
|
||||
var is_good_pair bool = false
|
||||
var p histogramPair
|
||||
p.idx2 = 0
|
||||
p.idx1 = p.idx2
|
||||
p.cost_combo = 0
|
||||
p.cost_diff = p.cost_combo
|
||||
if idx1 == idx2 {
|
||||
return
|
||||
}
|
||||
|
||||
if idx2 < idx1 {
|
||||
var t uint32 = idx2
|
||||
idx2 = idx1
|
||||
idx1 = t
|
||||
}
|
||||
|
||||
p.idx1 = idx1
|
||||
p.idx2 = idx2
|
||||
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
|
||||
p.cost_diff -= out[idx1].bit_cost_
|
||||
p.cost_diff -= out[idx2].bit_cost_
|
||||
|
||||
if out[idx1].total_count_ == 0 {
|
||||
p.cost_combo = out[idx2].bit_cost_
|
||||
is_good_pair = true
|
||||
} else if out[idx2].total_count_ == 0 {
|
||||
p.cost_combo = out[idx1].bit_cost_
|
||||
is_good_pair = true
|
||||
} else {
|
||||
var threshold float64
|
||||
if *num_pairs == 0 {
|
||||
threshold = 1e99
|
||||
} else {
|
||||
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
|
||||
}
|
||||
var combo histogramLiteral = out[idx1]
|
||||
var cost_combo float64
|
||||
histogramAddHistogramLiteral(&combo, &out[idx2])
|
||||
cost_combo = populationCostLiteral(&combo)
|
||||
if cost_combo < threshold-p.cost_diff {
|
||||
p.cost_combo = cost_combo
|
||||
is_good_pair = true
|
||||
}
|
||||
}
|
||||
|
||||
if is_good_pair {
|
||||
p.cost_diff += p.cost_combo
|
||||
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = pairs[0]
|
||||
(*num_pairs)++
|
||||
}
|
||||
|
||||
pairs[0] = p
|
||||
} else if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = p
|
||||
(*num_pairs)++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func histogramCombineLiteral(out []histogramLiteral, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
|
||||
var cost_diff_threshold float64 = 0.0
|
||||
var min_cluster_size uint = 1
|
||||
var num_pairs uint = 0
|
||||
{
|
||||
/* We maintain a vector of histogram pairs, with the property that the pair
|
||||
with the maximum bit cost reduction is the first. */
|
||||
var idx1 uint
|
||||
for idx1 = 0; idx1 < num_clusters; idx1++ {
|
||||
var idx2 uint
|
||||
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
|
||||
compareAndPushToQueueLiteral(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for num_clusters > min_cluster_size {
|
||||
var best_idx1 uint32
|
||||
var best_idx2 uint32
|
||||
var i uint
|
||||
if pairs[0].cost_diff >= cost_diff_threshold {
|
||||
cost_diff_threshold = 1e99
|
||||
min_cluster_size = max_clusters
|
||||
continue
|
||||
}
|
||||
|
||||
/* Take the best pair from the top of heap. */
|
||||
best_idx1 = pairs[0].idx1
|
||||
|
||||
best_idx2 = pairs[0].idx2
|
||||
histogramAddHistogramLiteral(&out[best_idx1], &out[best_idx2])
|
||||
out[best_idx1].bit_cost_ = pairs[0].cost_combo
|
||||
cluster_size[best_idx1] += cluster_size[best_idx2]
|
||||
for i = 0; i < symbols_size; i++ {
|
||||
if symbols[i] == best_idx2 {
|
||||
symbols[i] = best_idx1
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
if clusters[i] == best_idx2 {
|
||||
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
num_clusters--
|
||||
{
|
||||
/* Remove pairs intersecting the just combined best pair. */
|
||||
var copy_to_idx uint = 0
|
||||
for i = 0; i < num_pairs; i++ {
|
||||
var p *histogramPair = &pairs[i]
|
||||
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
|
||||
/* Remove invalid pair from the queue. */
|
||||
continue
|
||||
}
|
||||
|
||||
if histogramPairIsLess(&pairs[0], p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
var front histogramPair = pairs[0]
|
||||
pairs[0] = *p
|
||||
pairs[copy_to_idx] = front
|
||||
} else {
|
||||
pairs[copy_to_idx] = *p
|
||||
}
|
||||
|
||||
copy_to_idx++
|
||||
}
|
||||
|
||||
num_pairs = copy_to_idx
|
||||
}
|
||||
|
||||
/* Push new pairs formed with the combined histogram to the heap. */
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
compareAndPushToQueueLiteral(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
|
||||
return num_clusters
|
||||
}
|
||||
|
||||
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
|
||||
func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *histogramLiteral) float64 {
|
||||
if histogram.total_count_ == 0 {
|
||||
return 0.0
|
||||
} else {
|
||||
var tmp histogramLiteral = *histogram
|
||||
histogramAddHistogramLiteral(&tmp, candidate)
|
||||
return populationCostLiteral(&tmp) - candidate.bit_cost_
|
||||
}
|
||||
}
|
||||
|
||||
/* Find the best 'out' histogram for each of the 'in' histograms.
|
||||
When called, clusters[0..num_clusters) contains the unique values from
|
||||
symbols[0..in_size), but this property is not preserved in this function.
|
||||
Note: we assume that out[]->bit_cost_ is already up-to-date. */
|
||||
func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) {
|
||||
var i uint
|
||||
for i = 0; i < in_size; i++ {
|
||||
var best_out uint32
|
||||
if i == 0 {
|
||||
best_out = symbols[0]
|
||||
} else {
|
||||
best_out = symbols[i-1]
|
||||
}
|
||||
var best_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[best_out])
|
||||
var j uint
|
||||
for j = 0; j < num_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
symbols[i] = best_out
|
||||
}
|
||||
|
||||
/* Recompute each out based on raw and symbols. */
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
histogramClearLiteral(&out[clusters[i]])
|
||||
}
|
||||
|
||||
for i = 0; i < in_size; i++ {
|
||||
histogramAddHistogramLiteral(&out[symbols[i]], &in[i])
|
||||
}
|
||||
}
|
||||
|
||||
/* Reorders elements of the out[0..length) array and changes values in
|
||||
symbols[0..length) array in the following way:
|
||||
* when called, symbols[] contains indexes into out[], and has N unique
|
||||
values (possibly N < length)
|
||||
* on return, symbols'[i] = f(symbols[i]) and
|
||||
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
|
||||
where f is a bijection between the range of symbols[] and [0..N), and
|
||||
the first occurrences of values in symbols'[i] come in consecutive
|
||||
increasing order.
|
||||
Returns N, the number of unique values in symbols[]. */
|
||||
|
||||
var histogramReindexLiteral_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func histogramReindexLiteral(out []histogramLiteral, symbols []uint32, length uint) uint {
|
||||
var new_index []uint32 = make([]uint32, length)
|
||||
var next_index uint32
|
||||
var tmp []histogramLiteral
|
||||
var i uint
|
||||
for i = 0; i < length; i++ {
|
||||
new_index[i] = histogramReindexLiteral_kInvalidIndex
|
||||
}
|
||||
|
||||
next_index = 0
|
||||
for i = 0; i < length; i++ {
|
||||
if new_index[symbols[i]] == histogramReindexLiteral_kInvalidIndex {
|
||||
new_index[symbols[i]] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
|
||||
tmp and reduce the number of copying by the factor of 2. */
|
||||
tmp = make([]histogramLiteral, next_index)
|
||||
|
||||
next_index = 0
|
||||
for i = 0; i < length; i++ {
|
||||
if new_index[symbols[i]] == next_index {
|
||||
tmp[next_index] = out[symbols[i]]
|
||||
next_index++
|
||||
}
|
||||
|
||||
symbols[i] = new_index[symbols[i]]
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
for i = 0; uint32(i) < next_index; i++ {
|
||||
out[i] = tmp[i]
|
||||
}
|
||||
|
||||
tmp = nil
|
||||
return uint(next_index)
|
||||
}
|
||||
|
||||
func clusterHistogramsLiteral(in []histogramLiteral, in_size uint, max_histograms uint, out []histogramLiteral, out_size *uint, histogram_symbols []uint32) {
|
||||
var cluster_size []uint32 = make([]uint32, in_size)
|
||||
var clusters []uint32 = make([]uint32, in_size)
|
||||
var num_clusters uint = 0
|
||||
var max_input_histograms uint = 64
|
||||
var pairs_capacity uint = max_input_histograms * max_input_histograms / 2
|
||||
var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1))
|
||||
var i uint
|
||||
|
||||
/* For the first pass of clustering, we allow all pairs. */
|
||||
for i = 0; i < in_size; i++ {
|
||||
cluster_size[i] = 1
|
||||
}
|
||||
|
||||
for i = 0; i < in_size; i++ {
|
||||
out[i] = in[i]
|
||||
out[i].bit_cost_ = populationCostLiteral(&in[i])
|
||||
histogram_symbols[i] = uint32(i)
|
||||
}
|
||||
|
||||
for i = 0; i < in_size; i += max_input_histograms {
|
||||
var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
clusters[num_clusters+j] = uint32(i + j)
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity)
|
||||
num_clusters += num_new_clusters
|
||||
}
|
||||
{
|
||||
/* For the second pass, we limit the total number of histogram pairs.
|
||||
After this limit is reached, we only keep searching for the best pair. */
|
||||
var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < (max_num_pairs + 1) {
|
||||
var _new_size uint
|
||||
if pairs_capacity == 0 {
|
||||
_new_size = max_num_pairs + 1
|
||||
} else {
|
||||
_new_size = pairs_capacity
|
||||
}
|
||||
var new_array []histogramPair
|
||||
for _new_size < (max_num_pairs + 1) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramPair, _new_size)
|
||||
if pairs_capacity != 0 {
|
||||
copy(new_array, pairs[:pairs_capacity])
|
||||
}
|
||||
|
||||
pairs = new_array
|
||||
pairs_capacity = _new_size
|
||||
}
|
||||
|
||||
/* Collapse similar histograms. */
|
||||
num_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs)
|
||||
}
|
||||
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
/* Find the optimal map from original histograms to the final ones. */
|
||||
histogramRemapLiteral(in, in_size, clusters, num_clusters, out, histogram_symbols)
|
||||
|
||||
clusters = nil
|
||||
|
||||
/* Convert the context map to a canonical form. */
|
||||
*out_size = histogramReindexLiteral(out, histogram_symbols, in_size)
|
||||
}
|
||||
254
vendor/github.com/andybalholm/brotli/command.go
generated
vendored
Normal file
254
vendor/github.com/andybalholm/brotli/command.go
generated
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
package brotli
|
||||
|
||||
var kInsBase = []uint32{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
8,
|
||||
10,
|
||||
14,
|
||||
18,
|
||||
26,
|
||||
34,
|
||||
50,
|
||||
66,
|
||||
98,
|
||||
130,
|
||||
194,
|
||||
322,
|
||||
578,
|
||||
1090,
|
||||
2114,
|
||||
6210,
|
||||
22594,
|
||||
}
|
||||
|
||||
var kInsExtra = []uint32{
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
5,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
12,
|
||||
14,
|
||||
24,
|
||||
}
|
||||
|
||||
var kCopyBase = []uint32{
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
12,
|
||||
14,
|
||||
18,
|
||||
22,
|
||||
30,
|
||||
38,
|
||||
54,
|
||||
70,
|
||||
102,
|
||||
134,
|
||||
198,
|
||||
326,
|
||||
582,
|
||||
1094,
|
||||
2118,
|
||||
}
|
||||
|
||||
var kCopyExtra = []uint32{
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
5,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
24,
|
||||
}
|
||||
|
||||
func getInsertLengthCode(insertlen uint) uint16 {
|
||||
if insertlen < 6 {
|
||||
return uint16(insertlen)
|
||||
} else if insertlen < 130 {
|
||||
var nbits uint32 = log2FloorNonZero(insertlen-2) - 1
|
||||
return uint16((nbits << 1) + uint32((insertlen-2)>>nbits) + 2)
|
||||
} else if insertlen < 2114 {
|
||||
return uint16(log2FloorNonZero(insertlen-66) + 10)
|
||||
} else if insertlen < 6210 {
|
||||
return 21
|
||||
} else if insertlen < 22594 {
|
||||
return 22
|
||||
} else {
|
||||
return 23
|
||||
}
|
||||
}
|
||||
|
||||
func getCopyLengthCode(copylen uint) uint16 {
|
||||
if copylen < 10 {
|
||||
return uint16(copylen - 2)
|
||||
} else if copylen < 134 {
|
||||
var nbits uint32 = log2FloorNonZero(copylen-6) - 1
|
||||
return uint16((nbits << 1) + uint32((copylen-6)>>nbits) + 4)
|
||||
} else if copylen < 2118 {
|
||||
return uint16(log2FloorNonZero(copylen-70) + 12)
|
||||
} else {
|
||||
return 23
|
||||
}
|
||||
}
|
||||
|
||||
func combineLengthCodes(inscode uint16, copycode uint16, use_last_distance bool) uint16 {
|
||||
var bits64 uint16 = uint16(copycode&0x7 | (inscode&0x7)<<3)
|
||||
if use_last_distance && inscode < 8 && copycode < 16 {
|
||||
if copycode < 8 {
|
||||
return bits64
|
||||
} else {
|
||||
return bits64 | 64
|
||||
}
|
||||
} else {
|
||||
/* Specification: 5 Encoding of ... (last table) */
|
||||
/* offset = 2 * index, where index is in range [0..8] */
|
||||
var offset uint32 = 2 * ((uint32(copycode) >> 3) + 3*(uint32(inscode)>>3))
|
||||
|
||||
/* All values in specification are K * 64,
|
||||
where K = [2, 3, 6, 4, 5, 8, 7, 9, 10],
|
||||
i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9],
|
||||
K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D.
|
||||
All values in D require only 2 bits to encode.
|
||||
Magic constant is shifted 6 bits left, to avoid final multiplication. */
|
||||
offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0)
|
||||
|
||||
return uint16(offset | uint32(bits64))
|
||||
}
|
||||
}
|
||||
|
||||
func getLengthCode(insertlen uint, copylen uint, use_last_distance bool, code *uint16) {
|
||||
var inscode uint16 = getInsertLengthCode(insertlen)
|
||||
var copycode uint16 = getCopyLengthCode(copylen)
|
||||
*code = combineLengthCodes(inscode, copycode, use_last_distance)
|
||||
}
|
||||
|
||||
func getInsertBase(inscode uint16) uint32 {
|
||||
return kInsBase[inscode]
|
||||
}
|
||||
|
||||
func getInsertExtra(inscode uint16) uint32 {
|
||||
return kInsExtra[inscode]
|
||||
}
|
||||
|
||||
func getCopyBase(copycode uint16) uint32 {
|
||||
return kCopyBase[copycode]
|
||||
}
|
||||
|
||||
func getCopyExtra(copycode uint16) uint32 {
|
||||
return kCopyExtra[copycode]
|
||||
}
|
||||
|
||||
type command struct {
|
||||
insert_len_ uint32
|
||||
copy_len_ uint32
|
||||
dist_extra_ uint32
|
||||
cmd_prefix_ uint16
|
||||
dist_prefix_ uint16
|
||||
}
|
||||
|
||||
/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */
|
||||
func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) {
|
||||
/* Don't rely on signed int representation, use honest casts. */
|
||||
var delta uint32 = uint32(byte(int8(copylen_code_delta)))
|
||||
cmd.insert_len_ = uint32(insertlen)
|
||||
cmd.copy_len_ = uint32(uint32(copylen) | delta<<25)
|
||||
|
||||
/* The distance prefix and extra bits are stored in this Command as if
|
||||
npostfix and ndirect were 0, they are only recomputed later after the
|
||||
clustering if needed. */
|
||||
prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
|
||||
getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func makeInsertCommand(insertlen uint) (cmd command) {
|
||||
cmd.insert_len_ = uint32(insertlen)
|
||||
cmd.copy_len_ = 4 << 25
|
||||
cmd.dist_extra_ = 0
|
||||
cmd.dist_prefix_ = numDistanceShortCodes
|
||||
getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 {
|
||||
if uint32(self.dist_prefix_&0x3FF) < numDistanceShortCodes+dist.num_direct_distance_codes {
|
||||
return uint32(self.dist_prefix_) & 0x3FF
|
||||
} else {
|
||||
var dcode uint32 = uint32(self.dist_prefix_) & 0x3FF
|
||||
var nbits uint32 = uint32(self.dist_prefix_) >> 10
|
||||
var extra uint32 = self.dist_extra_
|
||||
var postfix_mask uint32 = (1 << dist.distance_postfix_bits) - 1
|
||||
var hcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) >> dist.distance_postfix_bits
|
||||
var lcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) & postfix_mask
|
||||
var offset uint32 = ((2 + (hcode & 1)) << nbits) - 4
|
||||
return ((offset + extra) << dist.distance_postfix_bits) + lcode + dist.num_direct_distance_codes + numDistanceShortCodes
|
||||
}
|
||||
}
|
||||
|
||||
func commandDistanceContext(self *command) uint32 {
|
||||
var r uint32 = uint32(self.cmd_prefix_) >> 6
|
||||
var c uint32 = uint32(self.cmd_prefix_) & 7
|
||||
if (r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2) {
|
||||
return c
|
||||
}
|
||||
|
||||
return 3
|
||||
}
|
||||
|
||||
func commandCopyLen(self *command) uint32 {
|
||||
return self.copy_len_ & 0x1FFFFFF
|
||||
}
|
||||
|
||||
func commandCopyLenCode(self *command) uint32 {
|
||||
var modifier uint32 = self.copy_len_ >> 25
|
||||
var delta int32 = int32(int8(byte(modifier | (modifier&0x40)<<1)))
|
||||
return uint32(int32(self.copy_len_&0x1FFFFFF) + delta)
|
||||
}
|
||||
834
vendor/github.com/andybalholm/brotli/compress_fragment.go
generated
vendored
Normal file
834
vendor/github.com/andybalholm/brotli/compress_fragment.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
773
vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
generated
vendored
Normal file
773
vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
77
vendor/github.com/andybalholm/brotli/constants.go
generated
vendored
Normal file
77
vendor/github.com/andybalholm/brotli/constants.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Specification: 7.3. Encoding of the context map */
|
||||
const contextMapMaxRle = 16
|
||||
|
||||
/* Specification: 2. Compressed representation overview */
|
||||
const maxNumberOfBlockTypes = 256
|
||||
|
||||
/* Specification: 3.3. Alphabet sizes: insert-and-copy length */
|
||||
const numLiteralSymbols = 256
|
||||
|
||||
const numCommandSymbols = 704
|
||||
|
||||
const numBlockLenSymbols = 26
|
||||
|
||||
const maxContextMapSymbols = (maxNumberOfBlockTypes + contextMapMaxRle)
|
||||
|
||||
const maxBlockTypeSymbols = (maxNumberOfBlockTypes + 2)
|
||||
|
||||
/* Specification: 3.5. Complex prefix codes */
|
||||
const repeatPreviousCodeLength = 16
|
||||
|
||||
const repeatZeroCodeLength = 17
|
||||
|
||||
const codeLengthCodes = (repeatZeroCodeLength + 1)
|
||||
|
||||
/* "code length of 8 is repeated" */
|
||||
const initialRepeatedCodeLength = 8
|
||||
|
||||
/* "Large Window Brotli" */
|
||||
const largeMaxDistanceBits = 62
|
||||
|
||||
const largeMinWbits = 10
|
||||
|
||||
const largeMaxWbits = 30
|
||||
|
||||
/* Specification: 4. Encoding of distances */
|
||||
const numDistanceShortCodes = 16
|
||||
|
||||
const maxNpostfix = 3
|
||||
|
||||
const maxNdirect = 120
|
||||
|
||||
const maxDistanceBits = 24
|
||||
|
||||
func distanceAlphabetSize(NPOSTFIX uint, NDIRECT uint, MAXNBITS uint) uint {
|
||||
return numDistanceShortCodes + NDIRECT + uint(MAXNBITS<<(NPOSTFIX+1))
|
||||
}
|
||||
|
||||
/* numDistanceSymbols == 1128 */
|
||||
const numDistanceSymbols = 1128
|
||||
|
||||
const maxDistance = 0x3FFFFFC
|
||||
|
||||
const maxAllowedDistance = 0x7FFFFFFC
|
||||
|
||||
/* 7.1. Context modes and context ID lookup for literals */
|
||||
/* "context IDs for literals are in the range of 0..63" */
|
||||
const literalContextBits = 6
|
||||
|
||||
/* 7.2. Context ID for distances */
|
||||
const distanceContextBits = 2
|
||||
|
||||
/* 9.1. Format of the Stream Header */
|
||||
/* Number of slack bytes for window size. Don't confuse
|
||||
with BROTLI_NUM_DISTANCE_SHORT_CODES. */
|
||||
const windowGap = 16
|
||||
|
||||
func maxBackwardLimit(W uint) uint {
|
||||
return (uint(1) << W) - windowGap
|
||||
}
|
||||
2176
vendor/github.com/andybalholm/brotli/context.go
generated
vendored
Normal file
2176
vendor/github.com/andybalholm/brotli/context.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2581
vendor/github.com/andybalholm/brotli/decode.go
generated
vendored
Normal file
2581
vendor/github.com/andybalholm/brotli/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
122890
vendor/github.com/andybalholm/brotli/dictionary.go
generated
vendored
Normal file
122890
vendor/github.com/andybalholm/brotli/dictionary.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
32779
vendor/github.com/andybalholm/brotli/dictionary_hash.go
generated
vendored
Normal file
32779
vendor/github.com/andybalholm/brotli/dictionary_hash.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1220
vendor/github.com/andybalholm/brotli/encode.go
generated
vendored
Normal file
1220
vendor/github.com/andybalholm/brotli/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
168
vendor/github.com/andybalholm/brotli/encoder.go
generated
vendored
Normal file
168
vendor/github.com/andybalholm/brotli/encoder.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
package brotli
|
||||
|
||||
import "github.com/andybalholm/brotli/matchfinder"
|
||||
|
||||
// An Encoder implements the matchfinder.Encoder interface, writing in Brotli format.
|
||||
type Encoder struct {
|
||||
wroteHeader bool
|
||||
bw bitWriter
|
||||
distCache []distanceCode
|
||||
}
|
||||
|
||||
func (e *Encoder) Reset() {
|
||||
e.wroteHeader = false
|
||||
e.bw = bitWriter{}
|
||||
}
|
||||
|
||||
func (e *Encoder) Encode(dst []byte, src []byte, matches []matchfinder.Match, lastBlock bool) []byte {
|
||||
e.bw.dst = dst
|
||||
if !e.wroteHeader {
|
||||
e.bw.writeBits(4, 15)
|
||||
e.wroteHeader = true
|
||||
}
|
||||
|
||||
var literalHisto [256]uint32
|
||||
var commandHisto [704]uint32
|
||||
var distanceHisto [64]uint32
|
||||
literalCount := 0
|
||||
commandCount := 0
|
||||
distanceCount := 0
|
||||
|
||||
if len(e.distCache) < len(matches) {
|
||||
e.distCache = make([]distanceCode, len(matches))
|
||||
}
|
||||
|
||||
// first pass: build the histograms
|
||||
pos := 0
|
||||
|
||||
// d is the ring buffer of the last 4 distances.
|
||||
d := [4]int{-10, -10, -10, -10}
|
||||
for i, m := range matches {
|
||||
if m.Unmatched > 0 {
|
||||
for _, c := range src[pos : pos+m.Unmatched] {
|
||||
literalHisto[c]++
|
||||
}
|
||||
literalCount += m.Unmatched
|
||||
}
|
||||
|
||||
insertCode := getInsertLengthCode(uint(m.Unmatched))
|
||||
copyCode := getCopyLengthCode(uint(m.Length))
|
||||
if m.Length == 0 {
|
||||
// If the stream ends with unmatched bytes, we need a dummy copy length.
|
||||
copyCode = 2
|
||||
}
|
||||
command := combineLengthCodes(insertCode, copyCode, false)
|
||||
commandHisto[command]++
|
||||
commandCount++
|
||||
|
||||
if command >= 128 && m.Length != 0 {
|
||||
var distCode distanceCode
|
||||
switch m.Distance {
|
||||
case d[3]:
|
||||
distCode.code = 0
|
||||
case d[2]:
|
||||
distCode.code = 1
|
||||
case d[1]:
|
||||
distCode.code = 2
|
||||
case d[0]:
|
||||
distCode.code = 3
|
||||
case d[3] - 1:
|
||||
distCode.code = 4
|
||||
case d[3] + 1:
|
||||
distCode.code = 5
|
||||
case d[3] - 2:
|
||||
distCode.code = 6
|
||||
case d[3] + 2:
|
||||
distCode.code = 7
|
||||
case d[3] - 3:
|
||||
distCode.code = 8
|
||||
case d[3] + 3:
|
||||
distCode.code = 9
|
||||
|
||||
// In my testing, codes 10–15 actually reduced the compression ratio.
|
||||
|
||||
default:
|
||||
distCode = getDistanceCode(m.Distance)
|
||||
}
|
||||
e.distCache[i] = distCode
|
||||
distanceHisto[distCode.code]++
|
||||
distanceCount++
|
||||
if distCode.code != 0 {
|
||||
d[0], d[1], d[2], d[3] = d[1], d[2], d[3], m.Distance
|
||||
}
|
||||
}
|
||||
|
||||
pos += m.Unmatched + m.Length
|
||||
}
|
||||
|
||||
storeMetaBlockHeaderBW(uint(len(src)), false, &e.bw)
|
||||
e.bw.writeBits(13, 0)
|
||||
|
||||
var literalDepths [256]byte
|
||||
var literalBits [256]uint16
|
||||
buildAndStoreHuffmanTreeFastBW(literalHisto[:], uint(literalCount), 8, literalDepths[:], literalBits[:], &e.bw)
|
||||
|
||||
var commandDepths [704]byte
|
||||
var commandBits [704]uint16
|
||||
buildAndStoreHuffmanTreeFastBW(commandHisto[:], uint(commandCount), 10, commandDepths[:], commandBits[:], &e.bw)
|
||||
|
||||
var distanceDepths [64]byte
|
||||
var distanceBits [64]uint16
|
||||
buildAndStoreHuffmanTreeFastBW(distanceHisto[:], uint(distanceCount), 6, distanceDepths[:], distanceBits[:], &e.bw)
|
||||
|
||||
pos = 0
|
||||
for i, m := range matches {
|
||||
insertCode := getInsertLengthCode(uint(m.Unmatched))
|
||||
copyCode := getCopyLengthCode(uint(m.Length))
|
||||
if m.Length == 0 {
|
||||
// If the stream ends with unmatched bytes, we need a dummy copy length.
|
||||
copyCode = 2
|
||||
}
|
||||
command := combineLengthCodes(insertCode, copyCode, false)
|
||||
e.bw.writeBits(uint(commandDepths[command]), uint64(commandBits[command]))
|
||||
if kInsExtra[insertCode] > 0 {
|
||||
e.bw.writeBits(uint(kInsExtra[insertCode]), uint64(m.Unmatched)-uint64(kInsBase[insertCode]))
|
||||
}
|
||||
if kCopyExtra[copyCode] > 0 {
|
||||
e.bw.writeBits(uint(kCopyExtra[copyCode]), uint64(m.Length)-uint64(kCopyBase[copyCode]))
|
||||
}
|
||||
|
||||
if m.Unmatched > 0 {
|
||||
for _, c := range src[pos : pos+m.Unmatched] {
|
||||
e.bw.writeBits(uint(literalDepths[c]), uint64(literalBits[c]))
|
||||
}
|
||||
}
|
||||
|
||||
if command >= 128 && m.Length != 0 {
|
||||
distCode := e.distCache[i]
|
||||
e.bw.writeBits(uint(distanceDepths[distCode.code]), uint64(distanceBits[distCode.code]))
|
||||
if distCode.nExtra > 0 {
|
||||
e.bw.writeBits(distCode.nExtra, distCode.extraBits)
|
||||
}
|
||||
}
|
||||
|
||||
pos += m.Unmatched + m.Length
|
||||
}
|
||||
|
||||
if lastBlock {
|
||||
e.bw.writeBits(2, 3) // islast + isempty
|
||||
e.bw.jumpToByteBoundary()
|
||||
}
|
||||
return e.bw.dst
|
||||
}
|
||||
|
||||
type distanceCode struct {
|
||||
code int
|
||||
nExtra uint
|
||||
extraBits uint64
|
||||
}
|
||||
|
||||
func getDistanceCode(distance int) distanceCode {
|
||||
d := distance + 3
|
||||
nbits := log2FloorNonZero(uint(d)) - 1
|
||||
prefix := (d >> nbits) & 1
|
||||
offset := (2 + prefix) << nbits
|
||||
distcode := int(2*(nbits-1)) + prefix + 16
|
||||
extra := d - offset
|
||||
return distanceCode{distcode, uint(nbits), uint64(extra)}
|
||||
}
|
||||
22
vendor/github.com/andybalholm/brotli/encoder_dict.go
generated
vendored
Normal file
22
vendor/github.com/andybalholm/brotli/encoder_dict.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package brotli
|
||||
|
||||
/* Dictionary data (words and transforms) for 1 possible context */
|
||||
type encoderDictionary struct {
|
||||
words *dictionary
|
||||
cutoffTransformsCount uint32
|
||||
cutoffTransforms uint64
|
||||
hash_table []uint16
|
||||
buckets []uint16
|
||||
dict_words []dictWord
|
||||
}
|
||||
|
||||
func initEncoderDictionary(dict *encoderDictionary) {
|
||||
dict.words = getDictionary()
|
||||
|
||||
dict.hash_table = kStaticDictionaryHash[:]
|
||||
dict.buckets = kStaticDictionaryBuckets[:]
|
||||
dict.dict_words = kStaticDictionaryWords[:]
|
||||
|
||||
dict.cutoffTransformsCount = kCutoffTransformsCount
|
||||
dict.cutoffTransforms = kCutoffTransforms
|
||||
}
|
||||
592
vendor/github.com/andybalholm/brotli/entropy_encode.go
generated
vendored
Normal file
592
vendor/github.com/andybalholm/brotli/entropy_encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4399
vendor/github.com/andybalholm/brotli/entropy_encode_static.go
generated
vendored
Normal file
4399
vendor/github.com/andybalholm/brotli/entropy_encode_static.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
290
vendor/github.com/andybalholm/brotli/fast_log.go
generated
vendored
Normal file
290
vendor/github.com/andybalholm/brotli/fast_log.go
generated
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Utilities for fast computation of logarithms. */
|
||||
|
||||
func log2FloorNonZero(n uint) uint32 {
|
||||
return uint32(bits.Len(n)) - 1
|
||||
}
|
||||
|
||||
/* A lookup table for small values of log2(int) to be used in entropy
|
||||
computation.
|
||||
|
||||
", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */
|
||||
var kLog2Table = []float32{
|
||||
0.0000000000000000,
|
||||
0.0000000000000000,
|
||||
1.0000000000000000,
|
||||
1.5849625007211563,
|
||||
2.0000000000000000,
|
||||
2.3219280948873622,
|
||||
2.5849625007211561,
|
||||
2.8073549220576042,
|
||||
3.0000000000000000,
|
||||
3.1699250014423126,
|
||||
3.3219280948873626,
|
||||
3.4594316186372978,
|
||||
3.5849625007211565,
|
||||
3.7004397181410922,
|
||||
3.8073549220576037,
|
||||
3.9068905956085187,
|
||||
4.0000000000000000,
|
||||
4.0874628412503400,
|
||||
4.1699250014423122,
|
||||
4.2479275134435852,
|
||||
4.3219280948873626,
|
||||
4.3923174227787607,
|
||||
4.4594316186372973,
|
||||
4.5235619560570131,
|
||||
4.5849625007211570,
|
||||
4.6438561897747244,
|
||||
4.7004397181410926,
|
||||
4.7548875021634691,
|
||||
4.8073549220576037,
|
||||
4.8579809951275728,
|
||||
4.9068905956085187,
|
||||
4.9541963103868758,
|
||||
5.0000000000000000,
|
||||
5.0443941193584534,
|
||||
5.0874628412503400,
|
||||
5.1292830169449664,
|
||||
5.1699250014423122,
|
||||
5.2094533656289501,
|
||||
5.2479275134435852,
|
||||
5.2854022188622487,
|
||||
5.3219280948873626,
|
||||
5.3575520046180838,
|
||||
5.3923174227787607,
|
||||
5.4262647547020979,
|
||||
5.4594316186372973,
|
||||
5.4918530963296748,
|
||||
5.5235619560570131,
|
||||
5.5545888516776376,
|
||||
5.5849625007211570,
|
||||
5.6147098441152083,
|
||||
5.6438561897747244,
|
||||
5.6724253419714961,
|
||||
5.7004397181410926,
|
||||
5.7279204545631996,
|
||||
5.7548875021634691,
|
||||
5.7813597135246599,
|
||||
5.8073549220576046,
|
||||
5.8328900141647422,
|
||||
5.8579809951275719,
|
||||
5.8826430493618416,
|
||||
5.9068905956085187,
|
||||
5.9307373375628867,
|
||||
5.9541963103868758,
|
||||
5.9772799234999168,
|
||||
6.0000000000000000,
|
||||
6.0223678130284544,
|
||||
6.0443941193584534,
|
||||
6.0660891904577721,
|
||||
6.0874628412503400,
|
||||
6.1085244567781700,
|
||||
6.1292830169449672,
|
||||
6.1497471195046822,
|
||||
6.1699250014423122,
|
||||
6.1898245588800176,
|
||||
6.2094533656289510,
|
||||
6.2288186904958804,
|
||||
6.2479275134435861,
|
||||
6.2667865406949019,
|
||||
6.2854022188622487,
|
||||
6.3037807481771031,
|
||||
6.3219280948873617,
|
||||
6.3398500028846252,
|
||||
6.3575520046180847,
|
||||
6.3750394313469254,
|
||||
6.3923174227787598,
|
||||
6.4093909361377026,
|
||||
6.4262647547020979,
|
||||
6.4429434958487288,
|
||||
6.4594316186372982,
|
||||
6.4757334309663976,
|
||||
6.4918530963296748,
|
||||
6.5077946401986964,
|
||||
6.5235619560570131,
|
||||
6.5391588111080319,
|
||||
6.5545888516776376,
|
||||
6.5698556083309478,
|
||||
6.5849625007211561,
|
||||
6.5999128421871278,
|
||||
6.6147098441152092,
|
||||
6.6293566200796095,
|
||||
6.6438561897747253,
|
||||
6.6582114827517955,
|
||||
6.6724253419714952,
|
||||
6.6865005271832185,
|
||||
6.7004397181410917,
|
||||
6.7142455176661224,
|
||||
6.7279204545631988,
|
||||
6.7414669864011465,
|
||||
6.7548875021634691,
|
||||
6.7681843247769260,
|
||||
6.7813597135246599,
|
||||
6.7944158663501062,
|
||||
6.8073549220576037,
|
||||
6.8201789624151887,
|
||||
6.8328900141647422,
|
||||
6.8454900509443757,
|
||||
6.8579809951275719,
|
||||
6.8703647195834048,
|
||||
6.8826430493618416,
|
||||
6.8948177633079437,
|
||||
6.9068905956085187,
|
||||
6.9188632372745955,
|
||||
6.9307373375628867,
|
||||
6.9425145053392399,
|
||||
6.9541963103868758,
|
||||
6.9657842846620879,
|
||||
6.9772799234999168,
|
||||
6.9886846867721664,
|
||||
7.0000000000000000,
|
||||
7.0112272554232540,
|
||||
7.0223678130284544,
|
||||
7.0334230015374501,
|
||||
7.0443941193584534,
|
||||
7.0552824355011898,
|
||||
7.0660891904577721,
|
||||
7.0768155970508317,
|
||||
7.0874628412503400,
|
||||
7.0980320829605272,
|
||||
7.1085244567781700,
|
||||
7.1189410727235076,
|
||||
7.1292830169449664,
|
||||
7.1395513523987937,
|
||||
7.1497471195046822,
|
||||
7.1598713367783891,
|
||||
7.1699250014423130,
|
||||
7.1799090900149345,
|
||||
7.1898245588800176,
|
||||
7.1996723448363644,
|
||||
7.2094533656289492,
|
||||
7.2191685204621621,
|
||||
7.2288186904958804,
|
||||
7.2384047393250794,
|
||||
7.2479275134435861,
|
||||
7.2573878426926521,
|
||||
7.2667865406949019,
|
||||
7.2761244052742384,
|
||||
7.2854022188622487,
|
||||
7.2946207488916270,
|
||||
7.3037807481771031,
|
||||
7.3128829552843557,
|
||||
7.3219280948873617,
|
||||
7.3309168781146177,
|
||||
7.3398500028846243,
|
||||
7.3487281542310781,
|
||||
7.3575520046180847,
|
||||
7.3663222142458151,
|
||||
7.3750394313469254,
|
||||
7.3837042924740528,
|
||||
7.3923174227787607,
|
||||
7.4008794362821844,
|
||||
7.4093909361377026,
|
||||
7.4178525148858991,
|
||||
7.4262647547020979,
|
||||
7.4346282276367255,
|
||||
7.4429434958487288,
|
||||
7.4512111118323299,
|
||||
7.4594316186372973,
|
||||
7.4676055500829976,
|
||||
7.4757334309663976,
|
||||
7.4838157772642564,
|
||||
7.4918530963296748,
|
||||
7.4998458870832057,
|
||||
7.5077946401986964,
|
||||
7.5156998382840436,
|
||||
7.5235619560570131,
|
||||
7.5313814605163119,
|
||||
7.5391588111080319,
|
||||
7.5468944598876373,
|
||||
7.5545888516776376,
|
||||
7.5622424242210728,
|
||||
7.5698556083309478,
|
||||
7.5774288280357487,
|
||||
7.5849625007211561,
|
||||
7.5924570372680806,
|
||||
7.5999128421871278,
|
||||
7.6073303137496113,
|
||||
7.6147098441152075,
|
||||
7.6220518194563764,
|
||||
7.6293566200796095,
|
||||
7.6366246205436488,
|
||||
7.6438561897747244,
|
||||
7.6510516911789290,
|
||||
7.6582114827517955,
|
||||
7.6653359171851765,
|
||||
7.6724253419714952,
|
||||
7.6794800995054464,
|
||||
7.6865005271832185,
|
||||
7.6934869574993252,
|
||||
7.7004397181410926,
|
||||
7.7073591320808825,
|
||||
7.7142455176661224,
|
||||
7.7210991887071856,
|
||||
7.7279204545631996,
|
||||
7.7347096202258392,
|
||||
7.7414669864011465,
|
||||
7.7481928495894596,
|
||||
7.7548875021634691,
|
||||
7.7615512324444795,
|
||||
7.7681843247769260,
|
||||
7.7747870596011737,
|
||||
7.7813597135246608,
|
||||
7.7879025593914317,
|
||||
7.7944158663501062,
|
||||
7.8008998999203047,
|
||||
7.8073549220576037,
|
||||
7.8137811912170374,
|
||||
7.8201789624151887,
|
||||
7.8265484872909159,
|
||||
7.8328900141647422,
|
||||
7.8392037880969445,
|
||||
7.8454900509443757,
|
||||
7.8517490414160571,
|
||||
7.8579809951275719,
|
||||
7.8641861446542798,
|
||||
7.8703647195834048,
|
||||
7.8765169465650002,
|
||||
7.8826430493618425,
|
||||
7.8887432488982601,
|
||||
7.8948177633079446,
|
||||
7.9008668079807496,
|
||||
7.9068905956085187,
|
||||
7.9128893362299619,
|
||||
7.9188632372745955,
|
||||
7.9248125036057813,
|
||||
7.9307373375628867,
|
||||
7.9366379390025719,
|
||||
7.9425145053392399,
|
||||
7.9483672315846778,
|
||||
7.9541963103868758,
|
||||
7.9600019320680806,
|
||||
7.9657842846620870,
|
||||
7.9715435539507720,
|
||||
7.9772799234999168,
|
||||
7.9829935746943104,
|
||||
7.9886846867721664,
|
||||
7.9943534368588578,
|
||||
}
|
||||
|
||||
/* Faster logarithm for small integers, with the property of log2(0) == 0. */
|
||||
func fastLog2(v uint) float64 {
|
||||
if v < uint(len(kLog2Table)) {
|
||||
return float64(kLog2Table[v])
|
||||
}
|
||||
|
||||
return math.Log2(float64(v))
|
||||
}
|
||||
45
vendor/github.com/andybalholm/brotli/find_match_length.go
generated
vendored
Normal file
45
vendor/github.com/andybalholm/brotli/find_match_length.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Function to find maximal matching prefixes of strings. */
|
||||
func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint {
|
||||
var matched uint = 0
|
||||
_, _ = s1[limit-1], s2[limit-1] // bounds check
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
// Compare 8 bytes at at time.
|
||||
for matched+8 <= limit {
|
||||
w1 := binary.LittleEndian.Uint64(s1[matched:])
|
||||
w2 := binary.LittleEndian.Uint64(s2[matched:])
|
||||
if w1 != w2 {
|
||||
return matched + uint(bits.TrailingZeros64(w1^w2)>>3)
|
||||
}
|
||||
matched += 8
|
||||
}
|
||||
case "386":
|
||||
// Compare 4 bytes at at time.
|
||||
for matched+4 <= limit {
|
||||
w1 := binary.LittleEndian.Uint32(s1[matched:])
|
||||
w2 := binary.LittleEndian.Uint32(s2[matched:])
|
||||
if w1 != w2 {
|
||||
return matched + uint(bits.TrailingZeros32(w1^w2)>>3)
|
||||
}
|
||||
matched += 4
|
||||
}
|
||||
}
|
||||
for matched < limit && s1[matched] == s2[matched] {
|
||||
matched++
|
||||
}
|
||||
return matched
|
||||
}
|
||||
287
vendor/github.com/andybalholm/brotli/h10.go
generated
vendored
Normal file
287
vendor/github.com/andybalholm/brotli/h10.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func (*h10) HashTypeLength() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (*h10) StoreLookahead() uint {
|
||||
return 128
|
||||
}
|
||||
|
||||
func hashBytesH10(data []byte) uint32 {
|
||||
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
|
||||
|
||||
/* The higher bits contain more mixture from the multiplication,
|
||||
so we take our results from there. */
|
||||
return h >> (32 - 17)
|
||||
}
|
||||
|
||||
/* A (forgetful) hash table where each hash bucket contains a binary tree of
|
||||
sequences whose first 4 bytes share the same hash code.
|
||||
Each sequence is 128 long and is identified by its starting
|
||||
position in the input data. The binary tree is sorted by the lexicographic
|
||||
order of the sequences, and it is also a max-heap with respect to the
|
||||
starting positions. */
|
||||
type h10 struct {
|
||||
hasherCommon
|
||||
window_mask_ uint
|
||||
buckets_ [1 << 17]uint32
|
||||
invalid_pos_ uint32
|
||||
forest []uint32
|
||||
}
|
||||
|
||||
func (h *h10) Initialize(params *encoderParams) {
|
||||
h.window_mask_ = (1 << params.lgwin) - 1
|
||||
h.invalid_pos_ = uint32(0 - h.window_mask_)
|
||||
var num_nodes uint = uint(1) << params.lgwin
|
||||
h.forest = make([]uint32, 2*num_nodes)
|
||||
}
|
||||
|
||||
func (h *h10) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
var invalid_pos uint32 = h.invalid_pos_
|
||||
var i uint32
|
||||
for i = 0; i < 1<<17; i++ {
|
||||
h.buckets_[i] = invalid_pos
|
||||
}
|
||||
}
|
||||
|
||||
func leftChildIndexH10(self *h10, pos uint) uint {
|
||||
return 2 * (pos & self.window_mask_)
|
||||
}
|
||||
|
||||
func rightChildIndexH10(self *h10, pos uint) uint {
|
||||
return 2*(pos&self.window_mask_) + 1
|
||||
}
|
||||
|
||||
/* Stores the hash of the next 4 bytes and in a single tree-traversal, the
|
||||
hash bucket's binary tree is searched for matches and is re-rooted at the
|
||||
current position.
|
||||
|
||||
If less than 128 data is available, the hash bucket of the
|
||||
current position is searched for matches, but the state of the hash table
|
||||
is not changed, since we can not know the final sorting order of the
|
||||
current (incomplete) sequence.
|
||||
|
||||
This function must be called with increasing cur_ix positions. */
|
||||
func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch {
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var max_comp_len uint = brotli_min_size_t(max_length, 128)
|
||||
var should_reroot_tree bool = (max_length >= 128)
|
||||
var key uint32 = hashBytesH10(data[cur_ix_masked:])
|
||||
var forest []uint32 = self.forest
|
||||
var prev_ix uint = uint(self.buckets_[key])
|
||||
var node_left uint = leftChildIndexH10(self, cur_ix)
|
||||
var node_right uint = rightChildIndexH10(self, cur_ix)
|
||||
var best_len_left uint = 0
|
||||
var best_len_right uint = 0
|
||||
var depth_remaining uint
|
||||
/* The forest index of the rightmost node of the left subtree of the new
|
||||
root, updated as we traverse and re-root the tree of the hash bucket. */
|
||||
|
||||
/* The forest index of the leftmost node of the right subtree of the new
|
||||
root, updated as we traverse and re-root the tree of the hash bucket. */
|
||||
|
||||
/* The match length of the rightmost node of the left subtree of the new
|
||||
root, updated as we traverse and re-root the tree of the hash bucket. */
|
||||
|
||||
/* The match length of the leftmost node of the right subtree of the new
|
||||
root, updated as we traverse and re-root the tree of the hash bucket. */
|
||||
if should_reroot_tree {
|
||||
self.buckets_[key] = uint32(cur_ix)
|
||||
}
|
||||
|
||||
for depth_remaining = 64; ; depth_remaining-- {
|
||||
var backward uint = cur_ix - prev_ix
|
||||
var prev_ix_masked uint = prev_ix & ring_buffer_mask
|
||||
if backward == 0 || backward > max_backward || depth_remaining == 0 {
|
||||
if should_reroot_tree {
|
||||
forest[node_left] = self.invalid_pos_
|
||||
forest[node_right] = self.invalid_pos_
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
{
|
||||
var cur_len uint = brotli_min_size_t(best_len_left, best_len_right)
|
||||
var len uint
|
||||
assert(cur_len <= 128)
|
||||
len = cur_len + findMatchLengthWithLimit(data[cur_ix_masked+cur_len:], data[prev_ix_masked+cur_len:], max_length-cur_len)
|
||||
if matches != nil && len > *best_len {
|
||||
*best_len = uint(len)
|
||||
initBackwardMatch(&matches[0], backward, uint(len))
|
||||
matches = matches[1:]
|
||||
}
|
||||
|
||||
if len >= max_comp_len {
|
||||
if should_reroot_tree {
|
||||
forest[node_left] = forest[leftChildIndexH10(self, prev_ix)]
|
||||
forest[node_right] = forest[rightChildIndexH10(self, prev_ix)]
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if data[cur_ix_masked+len] > data[prev_ix_masked+len] {
|
||||
best_len_left = uint(len)
|
||||
if should_reroot_tree {
|
||||
forest[node_left] = uint32(prev_ix)
|
||||
}
|
||||
|
||||
node_left = rightChildIndexH10(self, prev_ix)
|
||||
prev_ix = uint(forest[node_left])
|
||||
} else {
|
||||
best_len_right = uint(len)
|
||||
if should_reroot_tree {
|
||||
forest[node_right] = uint32(prev_ix)
|
||||
}
|
||||
|
||||
node_right = leftChildIndexH10(self, prev_ix)
|
||||
prev_ix = uint(forest[node_right])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matches
|
||||
}
|
||||
|
||||
/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the
|
||||
length of max_length and stores the position cur_ix in the hash table.
|
||||
|
||||
Sets *num_matches to the number of matches found, and stores the found
|
||||
matches in matches[0] to matches[*num_matches - 1]. The matches will be
|
||||
sorted by strictly increasing length and (non-strictly) increasing
|
||||
distance. */
|
||||
func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint {
|
||||
var orig_matches []backwardMatch = matches
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var best_len uint = 1
|
||||
var short_match_max_backward uint
|
||||
if params.quality != hqZopflificationQuality {
|
||||
short_match_max_backward = 16
|
||||
} else {
|
||||
short_match_max_backward = 64
|
||||
}
|
||||
var stop uint = cur_ix - short_match_max_backward
|
||||
var dict_matches [maxStaticDictionaryMatchLen + 1]uint32
|
||||
var i uint
|
||||
if cur_ix < short_match_max_backward {
|
||||
stop = 0
|
||||
}
|
||||
for i = cur_ix - 1; i > stop && best_len <= 2; i-- {
|
||||
var prev_ix uint = i
|
||||
var backward uint = cur_ix - prev_ix
|
||||
if backward > max_backward {
|
||||
break
|
||||
}
|
||||
|
||||
prev_ix &= ring_buffer_mask
|
||||
if data[cur_ix_masked] != data[prev_ix] || data[cur_ix_masked+1] != data[prev_ix+1] {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len > best_len {
|
||||
best_len = uint(len)
|
||||
initBackwardMatch(&matches[0], backward, uint(len))
|
||||
matches = matches[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if best_len < max_length {
|
||||
matches = storeAndFindMatchesH10(handle, data, cur_ix, ring_buffer_mask, max_length, max_backward, &best_len, matches)
|
||||
}
|
||||
|
||||
for i = 0; i <= maxStaticDictionaryMatchLen; i++ {
|
||||
dict_matches[i] = kInvalidMatch
|
||||
}
|
||||
{
|
||||
var minlen uint = brotli_max_size_t(4, best_len+1)
|
||||
if findAllStaticDictionaryMatches(dictionary, data[cur_ix_masked:], minlen, max_length, dict_matches[0:]) {
|
||||
var maxlen uint = brotli_min_size_t(maxStaticDictionaryMatchLen, max_length)
|
||||
var l uint
|
||||
for l = minlen; l <= maxlen; l++ {
|
||||
var dict_id uint32 = dict_matches[l]
|
||||
if dict_id < kInvalidMatch {
|
||||
var distance uint = max_backward + gap + uint(dict_id>>5) + 1
|
||||
if distance <= params.dist.max_distance {
|
||||
initDictionaryBackwardMatch(&matches[0], distance, l, uint(dict_id&31))
|
||||
matches = matches[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return uint(-cap(matches) + cap(orig_matches))
|
||||
}
|
||||
|
||||
/* Stores the hash of the next 4 bytes and re-roots the binary tree at the
|
||||
current sequence, without returning any matches.
|
||||
REQUIRES: ix + 128 <= end-of-current-block */
|
||||
func (h *h10) Store(data []byte, mask uint, ix uint) {
|
||||
var max_backward uint = h.window_mask_ - windowGap + 1
|
||||
/* Maximum distance is window size - 16, see section 9.1. of the spec. */
|
||||
storeAndFindMatchesH10(h, data, ix, mask, 128, max_backward, nil, nil)
|
||||
}
|
||||
|
||||
func (h *h10) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
var i uint = ix_start
|
||||
var j uint = ix_start
|
||||
if ix_start+63 <= ix_end {
|
||||
i = ix_end - 63
|
||||
}
|
||||
|
||||
if ix_start+512 <= i {
|
||||
for ; j < i; j += 8 {
|
||||
h.Store(data, mask, j)
|
||||
}
|
||||
}
|
||||
|
||||
for ; i < ix_end; i++ {
|
||||
h.Store(data, mask, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *h10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
|
||||
if num_bytes >= h.HashTypeLength()-1 && position >= 128 {
|
||||
var i_start uint = position - 128 + 1
|
||||
var i_end uint = brotli_min_size_t(position, i_start+num_bytes)
|
||||
/* Store the last `128 - 1` positions in the hasher.
|
||||
These could not be calculated before, since they require knowledge
|
||||
of both the previous and the current block. */
|
||||
|
||||
var i uint
|
||||
for i = i_start; i < i_end; i++ {
|
||||
/* Maximum distance is window size - 16, see section 9.1. of the spec.
|
||||
Furthermore, we have to make sure that we don't look further back
|
||||
from the start of the next block than the window size, otherwise we
|
||||
could access already overwritten areas of the ring-buffer. */
|
||||
var max_backward uint = h.window_mask_ - brotli_max_size_t(windowGap-1, position-i)
|
||||
|
||||
/* We know that i + 128 <= position + num_bytes, i.e. the
|
||||
end of the current block and that we have at least
|
||||
128 tail in the ring-buffer. */
|
||||
storeAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* MAX_NUM_MATCHES == 64 + MAX_TREE_SEARCH_DEPTH */
|
||||
const maxNumMatchesH10 = 128
|
||||
|
||||
func (*h10) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (*h10) PrepareDistanceCache(distance_cache []int) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
214
vendor/github.com/andybalholm/brotli/h5.go
generated
vendored
Normal file
214
vendor/github.com/andybalholm/brotli/h5.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* A (forgetful) hash table to the data seen by the compressor, to
|
||||
help create backward references to previous data.
|
||||
|
||||
This is a hash map of fixed size (bucket_size_) to a ring buffer of
|
||||
fixed size (block_size_). The ring buffer contains the last block_size_
|
||||
index positions of the given hash key in the compressed data. */
|
||||
func (*h5) HashTypeLength() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (*h5) StoreLookahead() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
/* HashBytes is the function that chooses the bucket to place the address in. */
|
||||
func hashBytesH5(data []byte, shift int) uint32 {
|
||||
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
|
||||
|
||||
/* The higher bits contain more mixture from the multiplication,
|
||||
so we take our results from there. */
|
||||
return uint32(h >> uint(shift))
|
||||
}
|
||||
|
||||
type h5 struct {
|
||||
hasherCommon
|
||||
bucket_size_ uint
|
||||
block_size_ uint
|
||||
hash_shift_ int
|
||||
block_mask_ uint32
|
||||
num []uint16
|
||||
buckets []uint32
|
||||
}
|
||||
|
||||
func (h *h5) Initialize(params *encoderParams) {
|
||||
h.hash_shift_ = 32 - h.params.bucket_bits
|
||||
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
|
||||
h.block_size_ = uint(1) << uint(h.params.block_bits)
|
||||
h.block_mask_ = uint32(h.block_size_ - 1)
|
||||
h.num = make([]uint16, h.bucket_size_)
|
||||
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
|
||||
}
|
||||
|
||||
func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
var num []uint16 = h.num
|
||||
var partial_prepare_threshold uint = h.bucket_size_ >> 6
|
||||
/* Partial preparation is 100 times slower (per socket). */
|
||||
if one_shot && input_size <= partial_prepare_threshold {
|
||||
var i uint
|
||||
for i = 0; i < input_size; i++ {
|
||||
var key uint32 = hashBytesH5(data[i:], h.hash_shift_)
|
||||
num[key] = 0
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < int(h.bucket_size_); i++ {
|
||||
num[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Look at 4 bytes at &data[ix & mask].
|
||||
Compute a hash from these, and store the value of ix at that position. */
|
||||
func (h *h5) Store(data []byte, mask uint, ix uint) {
|
||||
var num []uint16 = h.num
|
||||
var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_)
|
||||
var minor_ix uint = uint(num[key]) & uint(h.block_mask_)
|
||||
var offset uint = minor_ix + uint(key<<uint(h.params.block_bits))
|
||||
h.buckets[offset] = uint32(ix)
|
||||
num[key]++
|
||||
}
|
||||
|
||||
func (h *h5) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
var i uint
|
||||
for i = ix_start; i < ix_end; i++ {
|
||||
h.Store(data, mask, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *h5) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
|
||||
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
|
||||
/* Prepare the hashes for three last bytes of the last write.
|
||||
These could not be calculated before, since they require knowledge
|
||||
of both the previous and the current block. */
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-3)
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-2)
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *h5) PrepareDistanceCache(distance_cache []int) {
|
||||
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
|
||||
}
|
||||
|
||||
/* Find a longest backward match of &data[cur_ix] up to the length of
|
||||
max_length and stores the position cur_ix in the hash table.
|
||||
|
||||
REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache
|
||||
values; if this method is invoked repeatedly with the same distance
|
||||
cache values, it is enough to invoke PrepareDistanceCacheH5 once.
|
||||
|
||||
Does not look for matches longer than max_length.
|
||||
Does not look for matches further away than max_backward.
|
||||
Writes the best match into |out|.
|
||||
|out|->score is updated only if a better match is found. */
|
||||
func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
var num []uint16 = h.num
|
||||
var buckets []uint32 = h.buckets
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var min_score uint = out.score
|
||||
var best_score uint = out.score
|
||||
var best_len uint = out.len
|
||||
var i uint
|
||||
var bucket []uint32
|
||||
/* Don't accept a short copy from far away. */
|
||||
out.len = 0
|
||||
|
||||
out.len_code_delta = 0
|
||||
|
||||
/* Try last distance first. */
|
||||
for i = 0; i < uint(h.params.num_last_distances_to_check); i++ {
|
||||
var backward uint = uint(distance_cache[i])
|
||||
var prev_ix uint = uint(cur_ix - backward)
|
||||
if prev_ix >= cur_ix {
|
||||
continue
|
||||
}
|
||||
|
||||
if backward > max_backward {
|
||||
continue
|
||||
}
|
||||
|
||||
prev_ix &= ring_buffer_mask
|
||||
|
||||
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 3 || (len == 2 && i < 2) {
|
||||
/* Comparing for >= 2 does not change the semantics, but just saves for
|
||||
a few unnecessary binary logarithms in backward reference score,
|
||||
since we are not interested in such short matches. */
|
||||
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
|
||||
if best_score < score {
|
||||
if i != 0 {
|
||||
score -= backwardReferencePenaltyUsingLastDistance(i)
|
||||
}
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = best_score
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var key uint32 = hashBytesH5(data[cur_ix_masked:], h.hash_shift_)
|
||||
bucket = buckets[key<<uint(h.params.block_bits):]
|
||||
var down uint
|
||||
if uint(num[key]) > h.block_size_ {
|
||||
down = uint(num[key]) - h.block_size_
|
||||
} else {
|
||||
down = 0
|
||||
}
|
||||
for i = uint(num[key]); i > down; {
|
||||
var prev_ix uint
|
||||
i--
|
||||
prev_ix = uint(bucket[uint32(i)&h.block_mask_])
|
||||
var backward uint = cur_ix - prev_ix
|
||||
if backward > max_backward {
|
||||
break
|
||||
}
|
||||
|
||||
prev_ix &= ring_buffer_mask
|
||||
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 {
|
||||
/* Comparing for >= 3 does not change the semantics, but just saves
|
||||
for a few unnecessary binary logarithms in backward reference
|
||||
score, since we are not interested in such short matches. */
|
||||
var score uint = backwardReferenceScore(uint(len), backward)
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = best_score
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix)
|
||||
num[key]++
|
||||
}
|
||||
|
||||
if min_score == out.score {
|
||||
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
|
||||
}
|
||||
}
|
||||
216
vendor/github.com/andybalholm/brotli/h6.go
generated
vendored
Normal file
216
vendor/github.com/andybalholm/brotli/h6.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* A (forgetful) hash table to the data seen by the compressor, to
|
||||
help create backward references to previous data.
|
||||
|
||||
This is a hash map of fixed size (bucket_size_) to a ring buffer of
|
||||
fixed size (block_size_). The ring buffer contains the last block_size_
|
||||
index positions of the given hash key in the compressed data. */
|
||||
func (*h6) HashTypeLength() uint {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (*h6) StoreLookahead() uint {
|
||||
return 8
|
||||
}
|
||||
|
||||
/* HashBytes is the function that chooses the bucket to place the address in. */
|
||||
func hashBytesH6(data []byte, mask uint64, shift int) uint32 {
|
||||
var h uint64 = (binary.LittleEndian.Uint64(data) & mask) * kHashMul64Long
|
||||
|
||||
/* The higher bits contain more mixture from the multiplication,
|
||||
so we take our results from there. */
|
||||
return uint32(h >> uint(shift))
|
||||
}
|
||||
|
||||
type h6 struct {
|
||||
hasherCommon
|
||||
bucket_size_ uint
|
||||
block_size_ uint
|
||||
hash_shift_ int
|
||||
hash_mask_ uint64
|
||||
block_mask_ uint32
|
||||
num []uint16
|
||||
buckets []uint32
|
||||
}
|
||||
|
||||
func (h *h6) Initialize(params *encoderParams) {
|
||||
h.hash_shift_ = 64 - h.params.bucket_bits
|
||||
h.hash_mask_ = (^(uint64(0))) >> uint(64-8*h.params.hash_len)
|
||||
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
|
||||
h.block_size_ = uint(1) << uint(h.params.block_bits)
|
||||
h.block_mask_ = uint32(h.block_size_ - 1)
|
||||
h.num = make([]uint16, h.bucket_size_)
|
||||
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
|
||||
}
|
||||
|
||||
func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
var num []uint16 = h.num
|
||||
var partial_prepare_threshold uint = h.bucket_size_ >> 6
|
||||
/* Partial preparation is 100 times slower (per socket). */
|
||||
if one_shot && input_size <= partial_prepare_threshold {
|
||||
var i uint
|
||||
for i = 0; i < input_size; i++ {
|
||||
var key uint32 = hashBytesH6(data[i:], h.hash_mask_, h.hash_shift_)
|
||||
num[key] = 0
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < int(h.bucket_size_); i++ {
|
||||
num[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Look at 4 bytes at &data[ix & mask].
|
||||
Compute a hash from these, and store the value of ix at that position. */
|
||||
func (h *h6) Store(data []byte, mask uint, ix uint) {
|
||||
var num []uint16 = h.num
|
||||
var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_)
|
||||
var minor_ix uint = uint(num[key]) & uint(h.block_mask_)
|
||||
var offset uint = minor_ix + uint(key<<uint(h.params.block_bits))
|
||||
h.buckets[offset] = uint32(ix)
|
||||
num[key]++
|
||||
}
|
||||
|
||||
func (h *h6) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
var i uint
|
||||
for i = ix_start; i < ix_end; i++ {
|
||||
h.Store(data, mask, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *h6) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
|
||||
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
|
||||
/* Prepare the hashes for three last bytes of the last write.
|
||||
These could not be calculated before, since they require knowledge
|
||||
of both the previous and the current block. */
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-3)
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-2)
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *h6) PrepareDistanceCache(distance_cache []int) {
|
||||
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
|
||||
}
|
||||
|
||||
/* Find a longest backward match of &data[cur_ix] up to the length of
|
||||
max_length and stores the position cur_ix in the hash table.
|
||||
|
||||
REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache
|
||||
values; if this method is invoked repeatedly with the same distance
|
||||
cache values, it is enough to invoke PrepareDistanceCacheH6 once.
|
||||
|
||||
Does not look for matches longer than max_length.
|
||||
Does not look for matches further away than max_backward.
|
||||
Writes the best match into |out|.
|
||||
|out|->score is updated only if a better match is found. */
|
||||
func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
var num []uint16 = h.num
|
||||
var buckets []uint32 = h.buckets
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var min_score uint = out.score
|
||||
var best_score uint = out.score
|
||||
var best_len uint = out.len
|
||||
var i uint
|
||||
var bucket []uint32
|
||||
/* Don't accept a short copy from far away. */
|
||||
out.len = 0
|
||||
|
||||
out.len_code_delta = 0
|
||||
|
||||
/* Try last distance first. */
|
||||
for i = 0; i < uint(h.params.num_last_distances_to_check); i++ {
|
||||
var backward uint = uint(distance_cache[i])
|
||||
var prev_ix uint = uint(cur_ix - backward)
|
||||
if prev_ix >= cur_ix {
|
||||
continue
|
||||
}
|
||||
|
||||
if backward > max_backward {
|
||||
continue
|
||||
}
|
||||
|
||||
prev_ix &= ring_buffer_mask
|
||||
|
||||
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 3 || (len == 2 && i < 2) {
|
||||
/* Comparing for >= 2 does not change the semantics, but just saves for
|
||||
a few unnecessary binary logarithms in backward reference score,
|
||||
since we are not interested in such short matches. */
|
||||
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
|
||||
if best_score < score {
|
||||
if i != 0 {
|
||||
score -= backwardReferencePenaltyUsingLastDistance(i)
|
||||
}
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = best_score
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var key uint32 = hashBytesH6(data[cur_ix_masked:], h.hash_mask_, h.hash_shift_)
|
||||
bucket = buckets[key<<uint(h.params.block_bits):]
|
||||
var down uint
|
||||
if uint(num[key]) > h.block_size_ {
|
||||
down = uint(num[key]) - h.block_size_
|
||||
} else {
|
||||
down = 0
|
||||
}
|
||||
for i = uint(num[key]); i > down; {
|
||||
var prev_ix uint
|
||||
i--
|
||||
prev_ix = uint(bucket[uint32(i)&h.block_mask_])
|
||||
var backward uint = cur_ix - prev_ix
|
||||
if backward > max_backward {
|
||||
break
|
||||
}
|
||||
|
||||
prev_ix &= ring_buffer_mask
|
||||
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 {
|
||||
/* Comparing for >= 3 does not change the semantics, but just saves
|
||||
for a few unnecessary binary logarithms in backward reference
|
||||
score, since we are not interested in such short matches. */
|
||||
var score uint = backwardReferenceScore(uint(len), backward)
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = best_score
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix)
|
||||
num[key]++
|
||||
}
|
||||
|
||||
if min_score == out.score {
|
||||
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
|
||||
}
|
||||
}
|
||||
342
vendor/github.com/andybalholm/brotli/hash.go
generated
vendored
Normal file
342
vendor/github.com/andybalholm/brotli/hash.go
generated
vendored
Normal file
@ -0,0 +1,342 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type hasherCommon struct {
|
||||
params hasherParams
|
||||
is_prepared_ bool
|
||||
dict_num_lookups uint
|
||||
dict_num_matches uint
|
||||
}
|
||||
|
||||
func (h *hasherCommon) Common() *hasherCommon {
|
||||
return h
|
||||
}
|
||||
|
||||
type hasherHandle interface {
|
||||
Common() *hasherCommon
|
||||
Initialize(params *encoderParams)
|
||||
Prepare(one_shot bool, input_size uint, data []byte)
|
||||
StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint)
|
||||
HashTypeLength() uint
|
||||
StoreLookahead() uint
|
||||
PrepareDistanceCache(distance_cache []int)
|
||||
FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult)
|
||||
StoreRange(data []byte, mask uint, ix_start uint, ix_end uint)
|
||||
Store(data []byte, mask uint, ix uint)
|
||||
}
|
||||
|
||||
const kCutoffTransformsCount uint32 = 10
|
||||
|
||||
/* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */
|
||||
/* 0+0, 4+8, 8+19, 12+11, 16+26, 20+43, 24+32, 28+20, 32+27, 36+28 */
|
||||
const kCutoffTransforms uint64 = 0x071B520ADA2D3200
|
||||
|
||||
type hasherSearchResult struct {
|
||||
len uint
|
||||
distance uint
|
||||
score uint
|
||||
len_code_delta int
|
||||
}
|
||||
|
||||
/* kHashMul32 multiplier has these properties:
|
||||
* The multiplier must be odd. Otherwise we may lose the highest bit.
|
||||
* No long streaks of ones or zeros.
|
||||
* There is no effort to ensure that it is a prime, the oddity is enough
|
||||
for this use.
|
||||
* The number has been tuned heuristically against compression benchmarks. */
|
||||
const kHashMul32 uint32 = 0x1E35A7BD
|
||||
|
||||
const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD
|
||||
|
||||
const kHashMul64Long uint64 = 0x1FE35A7BD3579BD3
|
||||
|
||||
func hash14(data []byte) uint32 {
|
||||
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
|
||||
|
||||
/* The higher bits contain more mixture from the multiplication,
|
||||
so we take our results from there. */
|
||||
return h >> (32 - 14)
|
||||
}
|
||||
|
||||
func prepareDistanceCache(distance_cache []int, num_distances int) {
|
||||
if num_distances > 4 {
|
||||
var last_distance int = distance_cache[0]
|
||||
distance_cache[4] = last_distance - 1
|
||||
distance_cache[5] = last_distance + 1
|
||||
distance_cache[6] = last_distance - 2
|
||||
distance_cache[7] = last_distance + 2
|
||||
distance_cache[8] = last_distance - 3
|
||||
distance_cache[9] = last_distance + 3
|
||||
if num_distances > 10 {
|
||||
var next_last_distance int = distance_cache[1]
|
||||
distance_cache[10] = next_last_distance - 1
|
||||
distance_cache[11] = next_last_distance + 1
|
||||
distance_cache[12] = next_last_distance - 2
|
||||
distance_cache[13] = next_last_distance + 2
|
||||
distance_cache[14] = next_last_distance - 3
|
||||
distance_cache[15] = next_last_distance + 3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const literalByteScore = 135
|
||||
|
||||
const distanceBitPenalty = 30
|
||||
|
||||
/* Score must be positive after applying maximal penalty. */
|
||||
const scoreBase = (distanceBitPenalty * 8 * 8)
|
||||
|
||||
/* Usually, we always choose the longest backward reference. This function
|
||||
allows for the exception of that rule.
|
||||
|
||||
If we choose a backward reference that is further away, it will
|
||||
usually be coded with more bits. We approximate this by assuming
|
||||
log2(distance). If the distance can be expressed in terms of the
|
||||
last four distances, we use some heuristic constants to estimate
|
||||
the bits cost. For the first up to four literals we use the bit
|
||||
cost of the literals from the literal cost model, after that we
|
||||
use the average bit cost of the cost model.
|
||||
|
||||
This function is used to sometimes discard a longer backward reference
|
||||
when it is not much longer and the bit cost for encoding it is more
|
||||
than the saved literals.
|
||||
|
||||
backward_reference_offset MUST be positive. */
|
||||
func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint {
|
||||
return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset))
|
||||
}
|
||||
|
||||
func backwardReferenceScoreUsingLastDistance(copy_length uint) uint {
|
||||
return literalByteScore*uint(copy_length) + scoreBase + 15
|
||||
}
|
||||
|
||||
func backwardReferencePenaltyUsingLastDistance(distance_short_code uint) uint {
|
||||
return uint(39) + ((0x1CA10 >> (distance_short_code & 0xE)) & 0xE)
|
||||
}
|
||||
|
||||
func testStaticDictionaryItem(dictionary *encoderDictionary, item uint, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult) bool {
|
||||
var len uint
|
||||
var word_idx uint
|
||||
var offset uint
|
||||
var matchlen uint
|
||||
var backward uint
|
||||
var score uint
|
||||
len = item & 0x1F
|
||||
word_idx = item >> 5
|
||||
offset = uint(dictionary.words.offsets_by_length[len]) + len*word_idx
|
||||
if len > max_length {
|
||||
return false
|
||||
}
|
||||
|
||||
matchlen = findMatchLengthWithLimit(data, dictionary.words.data[offset:], uint(len))
|
||||
if matchlen+uint(dictionary.cutoffTransformsCount) <= len || matchlen == 0 {
|
||||
return false
|
||||
}
|
||||
{
|
||||
var cut uint = len - matchlen
|
||||
var transform_id uint = (cut << 2) + uint((dictionary.cutoffTransforms>>(cut*6))&0x3F)
|
||||
backward = max_backward + 1 + word_idx + (transform_id << dictionary.words.size_bits_by_length[len])
|
||||
}
|
||||
|
||||
if backward > max_distance {
|
||||
return false
|
||||
}
|
||||
|
||||
score = backwardReferenceScore(matchlen, backward)
|
||||
if score < out.score {
|
||||
return false
|
||||
}
|
||||
|
||||
out.len = matchlen
|
||||
out.len_code_delta = int(len) - int(matchlen)
|
||||
out.distance = backward
|
||||
out.score = score
|
||||
return true
|
||||
}
|
||||
|
||||
func searchInStaticDictionary(dictionary *encoderDictionary, handle hasherHandle, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult, shallow bool) {
|
||||
var key uint
|
||||
var i uint
|
||||
var self *hasherCommon = handle.Common()
|
||||
if self.dict_num_matches < self.dict_num_lookups>>7 {
|
||||
return
|
||||
}
|
||||
|
||||
key = uint(hash14(data) << 1)
|
||||
for i = 0; ; (func() { i++; key++ })() {
|
||||
var tmp uint
|
||||
if shallow {
|
||||
tmp = 1
|
||||
} else {
|
||||
tmp = 2
|
||||
}
|
||||
if i >= tmp {
|
||||
break
|
||||
}
|
||||
var item uint = uint(dictionary.hash_table[key])
|
||||
self.dict_num_lookups++
|
||||
if item != 0 {
|
||||
var item_matches bool = testStaticDictionaryItem(dictionary, item, data, max_length, max_backward, max_distance, out)
|
||||
if item_matches {
|
||||
self.dict_num_matches++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type backwardMatch struct {
|
||||
distance uint32
|
||||
length_and_code uint32
|
||||
}
|
||||
|
||||
func initBackwardMatch(self *backwardMatch, dist uint, len uint) {
|
||||
self.distance = uint32(dist)
|
||||
self.length_and_code = uint32(len << 5)
|
||||
}
|
||||
|
||||
func initDictionaryBackwardMatch(self *backwardMatch, dist uint, len uint, len_code uint) {
|
||||
self.distance = uint32(dist)
|
||||
var tmp uint
|
||||
if len == len_code {
|
||||
tmp = 0
|
||||
} else {
|
||||
tmp = len_code
|
||||
}
|
||||
self.length_and_code = uint32(len<<5 | tmp)
|
||||
}
|
||||
|
||||
func backwardMatchLength(self *backwardMatch) uint {
|
||||
return uint(self.length_and_code >> 5)
|
||||
}
|
||||
|
||||
func backwardMatchLengthCode(self *backwardMatch) uint {
|
||||
var code uint = uint(self.length_and_code) & 31
|
||||
if code != 0 {
|
||||
return code
|
||||
} else {
|
||||
return backwardMatchLength(self)
|
||||
}
|
||||
}
|
||||
|
||||
func hasherReset(handle hasherHandle) {
|
||||
if handle == nil {
|
||||
return
|
||||
}
|
||||
handle.Common().is_prepared_ = false
|
||||
}
|
||||
|
||||
func newHasher(typ int) hasherHandle {
|
||||
switch typ {
|
||||
case 2:
|
||||
return &hashLongestMatchQuickly{
|
||||
bucketBits: 16,
|
||||
bucketSweep: 1,
|
||||
hashLen: 5,
|
||||
useDictionary: true,
|
||||
}
|
||||
case 3:
|
||||
return &hashLongestMatchQuickly{
|
||||
bucketBits: 16,
|
||||
bucketSweep: 2,
|
||||
hashLen: 5,
|
||||
useDictionary: false,
|
||||
}
|
||||
case 4:
|
||||
return &hashLongestMatchQuickly{
|
||||
bucketBits: 17,
|
||||
bucketSweep: 4,
|
||||
hashLen: 5,
|
||||
useDictionary: true,
|
||||
}
|
||||
case 5:
|
||||
return new(h5)
|
||||
case 6:
|
||||
return new(h6)
|
||||
case 10:
|
||||
return new(h10)
|
||||
case 35:
|
||||
return &hashComposite{
|
||||
ha: newHasher(3),
|
||||
hb: &hashRolling{jump: 4},
|
||||
}
|
||||
case 40:
|
||||
return &hashForgetfulChain{
|
||||
bucketBits: 15,
|
||||
numBanks: 1,
|
||||
bankBits: 16,
|
||||
numLastDistancesToCheck: 4,
|
||||
}
|
||||
case 41:
|
||||
return &hashForgetfulChain{
|
||||
bucketBits: 15,
|
||||
numBanks: 1,
|
||||
bankBits: 16,
|
||||
numLastDistancesToCheck: 10,
|
||||
}
|
||||
case 42:
|
||||
return &hashForgetfulChain{
|
||||
bucketBits: 15,
|
||||
numBanks: 512,
|
||||
bankBits: 9,
|
||||
numLastDistancesToCheck: 16,
|
||||
}
|
||||
case 54:
|
||||
return &hashLongestMatchQuickly{
|
||||
bucketBits: 20,
|
||||
bucketSweep: 4,
|
||||
hashLen: 7,
|
||||
useDictionary: false,
|
||||
}
|
||||
case 55:
|
||||
return &hashComposite{
|
||||
ha: newHasher(54),
|
||||
hb: &hashRolling{jump: 4},
|
||||
}
|
||||
case 65:
|
||||
return &hashComposite{
|
||||
ha: newHasher(6),
|
||||
hb: &hashRolling{jump: 1},
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("unknown hasher type: %d", typ))
|
||||
}
|
||||
|
||||
func hasherSetup(handle *hasherHandle, params *encoderParams, data []byte, position uint, input_size uint, is_last bool) {
|
||||
var self hasherHandle = nil
|
||||
var common *hasherCommon = nil
|
||||
var one_shot bool = (position == 0 && is_last)
|
||||
if *handle == nil {
|
||||
chooseHasher(params, ¶ms.hasher)
|
||||
self = newHasher(params.hasher.type_)
|
||||
|
||||
*handle = self
|
||||
common = self.Common()
|
||||
common.params = params.hasher
|
||||
self.Initialize(params)
|
||||
}
|
||||
|
||||
self = *handle
|
||||
common = self.Common()
|
||||
if !common.is_prepared_ {
|
||||
self.Prepare(one_shot, input_size, data)
|
||||
|
||||
if position == 0 {
|
||||
common.dict_num_lookups = 0
|
||||
common.dict_num_matches = 0
|
||||
}
|
||||
|
||||
common.is_prepared_ = true
|
||||
}
|
||||
}
|
||||
|
||||
func initOrStitchToPreviousBlock(handle *hasherHandle, data []byte, mask uint, params *encoderParams, position uint, input_size uint, is_last bool) {
|
||||
var self hasherHandle
|
||||
hasherSetup(handle, params, data, position, input_size, is_last)
|
||||
self = *handle
|
||||
self.StitchToPreviousBlock(input_size, position, data, mask)
|
||||
}
|
||||
93
vendor/github.com/andybalholm/brotli/hash_composite.go
generated
vendored
Normal file
93
vendor/github.com/andybalholm/brotli/hash_composite.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func (h *hashComposite) HashTypeLength() uint {
|
||||
var a uint = h.ha.HashTypeLength()
|
||||
var b uint = h.hb.HashTypeLength()
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashComposite) StoreLookahead() uint {
|
||||
var a uint = h.ha.StoreLookahead()
|
||||
var b uint = h.hb.StoreLookahead()
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A
|
||||
and HASHER_B. */
|
||||
type hashComposite struct {
|
||||
hasherCommon
|
||||
ha hasherHandle
|
||||
hb hasherHandle
|
||||
params *encoderParams
|
||||
}
|
||||
|
||||
func (h *hashComposite) Initialize(params *encoderParams) {
|
||||
h.params = params
|
||||
}
|
||||
|
||||
/* TODO: Initialize of the hashers is defered to Prepare (and params
|
||||
remembered here) because we don't get the one_shot and input_size params
|
||||
here that are needed to know the memory size of them. Instead provide
|
||||
those params to all hashers InitializehashComposite */
|
||||
func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
if h.ha == nil {
|
||||
var common_a *hasherCommon
|
||||
var common_b *hasherCommon
|
||||
|
||||
common_a = h.ha.Common()
|
||||
common_a.params = h.params.hasher
|
||||
common_a.is_prepared_ = false
|
||||
common_a.dict_num_lookups = 0
|
||||
common_a.dict_num_matches = 0
|
||||
h.ha.Initialize(h.params)
|
||||
|
||||
common_b = h.hb.Common()
|
||||
common_b.params = h.params.hasher
|
||||
common_b.is_prepared_ = false
|
||||
common_b.dict_num_lookups = 0
|
||||
common_b.dict_num_matches = 0
|
||||
h.hb.Initialize(h.params)
|
||||
}
|
||||
|
||||
h.ha.Prepare(one_shot, input_size, data)
|
||||
h.hb.Prepare(one_shot, input_size, data)
|
||||
}
|
||||
|
||||
func (h *hashComposite) Store(data []byte, mask uint, ix uint) {
|
||||
h.ha.Store(data, mask, ix)
|
||||
h.hb.Store(data, mask, ix)
|
||||
}
|
||||
|
||||
func (h *hashComposite) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
h.ha.StoreRange(data, mask, ix_start, ix_end)
|
||||
h.hb.StoreRange(data, mask, ix_start, ix_end)
|
||||
}
|
||||
|
||||
func (h *hashComposite) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
|
||||
h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
|
||||
h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
|
||||
}
|
||||
|
||||
func (h *hashComposite) PrepareDistanceCache(distance_cache []int) {
|
||||
h.ha.PrepareDistanceCache(distance_cache)
|
||||
h.hb.PrepareDistanceCache(distance_cache)
|
||||
}
|
||||
|
||||
func (h *hashComposite) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
h.ha.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out)
|
||||
h.hb.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out)
|
||||
}
|
||||
252
vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go
generated
vendored
Normal file
252
vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go
generated
vendored
Normal file
@ -0,0 +1,252 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func (*hashForgetfulChain) HashTypeLength() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (*hashForgetfulChain) StoreLookahead() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
/* HashBytes is the function that chooses the bucket to place the address in.*/
|
||||
func (h *hashForgetfulChain) HashBytes(data []byte) uint {
|
||||
var hash uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
|
||||
|
||||
/* The higher bits contain more mixture from the multiplication,
|
||||
so we take our results from there. */
|
||||
return uint(hash >> (32 - h.bucketBits))
|
||||
}
|
||||
|
||||
type slot struct {
|
||||
delta uint16
|
||||
next uint16
|
||||
}
|
||||
|
||||
/* A (forgetful) hash table to the data seen by the compressor, to
|
||||
help create backward references to previous data.
|
||||
|
||||
Hashes are stored in chains which are bucketed to groups. Group of chains
|
||||
share a storage "bank". When more than "bank size" chain nodes are added,
|
||||
oldest nodes are replaced; this way several chains may share a tail. */
|
||||
type hashForgetfulChain struct {
|
||||
hasherCommon
|
||||
|
||||
bucketBits uint
|
||||
numBanks uint
|
||||
bankBits uint
|
||||
numLastDistancesToCheck int
|
||||
|
||||
addr []uint32
|
||||
head []uint16
|
||||
tiny_hash [65536]byte
|
||||
banks [][]slot
|
||||
free_slot_idx []uint16
|
||||
max_hops uint
|
||||
}
|
||||
|
||||
func (h *hashForgetfulChain) Initialize(params *encoderParams) {
|
||||
var q uint
|
||||
if params.quality > 6 {
|
||||
q = 7
|
||||
} else {
|
||||
q = 8
|
||||
}
|
||||
h.max_hops = q << uint(params.quality-4)
|
||||
|
||||
bankSize := 1 << h.bankBits
|
||||
bucketSize := 1 << h.bucketBits
|
||||
|
||||
h.addr = make([]uint32, bucketSize)
|
||||
h.head = make([]uint16, bucketSize)
|
||||
h.banks = make([][]slot, h.numBanks)
|
||||
for i := range h.banks {
|
||||
h.banks[i] = make([]slot, bankSize)
|
||||
}
|
||||
h.free_slot_idx = make([]uint16, h.numBanks)
|
||||
}
|
||||
|
||||
func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
var partial_prepare_threshold uint = (1 << h.bucketBits) >> 6
|
||||
/* Partial preparation is 100 times slower (per socket). */
|
||||
if one_shot && input_size <= partial_prepare_threshold {
|
||||
var i uint
|
||||
for i = 0; i < input_size; i++ {
|
||||
var bucket uint = h.HashBytes(data[i:])
|
||||
|
||||
/* See InitEmpty comment. */
|
||||
h.addr[bucket] = 0xCCCCCCCC
|
||||
|
||||
h.head[bucket] = 0xCCCC
|
||||
}
|
||||
} else {
|
||||
/* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position
|
||||
processed by hasher never reaches 3GB + 64M; this makes all new chains
|
||||
to be terminated after the first node. */
|
||||
for i := range h.addr {
|
||||
h.addr[i] = 0xCCCCCCCC
|
||||
}
|
||||
|
||||
for i := range h.head {
|
||||
h.head[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
h.tiny_hash = [65536]byte{}
|
||||
for i := range h.free_slot_idx {
|
||||
h.free_slot_idx[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
|
||||
node to corresponding chain; also update tiny_hash for current position. */
|
||||
func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) {
|
||||
var key uint = h.HashBytes(data[ix&mask:])
|
||||
var bank uint = key & (h.numBanks - 1)
|
||||
idx := uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1)
|
||||
h.free_slot_idx[bank]++
|
||||
var delta uint = ix - uint(h.addr[key])
|
||||
h.tiny_hash[uint16(ix)] = byte(key)
|
||||
if delta > 0xFFFF {
|
||||
delta = 0xFFFF
|
||||
}
|
||||
h.banks[bank][idx].delta = uint16(delta)
|
||||
h.banks[bank][idx].next = h.head[key]
|
||||
h.addr[key] = uint32(ix)
|
||||
h.head[key] = uint16(idx)
|
||||
}
|
||||
|
||||
func (h *hashForgetfulChain) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
var i uint
|
||||
for i = ix_start; i < ix_end; i++ {
|
||||
h.Store(data, mask, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashForgetfulChain) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
|
||||
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
|
||||
/* Prepare the hashes for three last bytes of the last write.
|
||||
These could not be calculated before, since they require knowledge
|
||||
of both the previous and the current block. */
|
||||
h.Store(ringbuffer, ring_buffer_mask, position-3)
|
||||
h.Store(ringbuffer, ring_buffer_mask, position-2)
|
||||
h.Store(ringbuffer, ring_buffer_mask, position-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) {
|
||||
prepareDistanceCache(distance_cache, h.numLastDistancesToCheck)
|
||||
}
|
||||
|
||||
/* Find a longest backward match of &data[cur_ix] up to the length of
|
||||
max_length and stores the position cur_ix in the hash table.
|
||||
|
||||
REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache
|
||||
values; if this method is invoked repeatedly with the same distance
|
||||
cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once.
|
||||
|
||||
Does not look for matches longer than max_length.
|
||||
Does not look for matches further away than max_backward.
|
||||
Writes the best match into |out|.
|
||||
|out|->score is updated only if a better match is found. */
|
||||
func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var min_score uint = out.score
|
||||
var best_score uint = out.score
|
||||
var best_len uint = out.len
|
||||
var key uint = h.HashBytes(data[cur_ix_masked:])
|
||||
var tiny_hash byte = byte(key)
|
||||
/* Don't accept a short copy from far away. */
|
||||
out.len = 0
|
||||
|
||||
out.len_code_delta = 0
|
||||
|
||||
/* Try last distance first. */
|
||||
for i := 0; i < h.numLastDistancesToCheck; i++ {
|
||||
var backward uint = uint(distance_cache[i])
|
||||
var prev_ix uint = (cur_ix - backward)
|
||||
|
||||
/* For distance code 0 we want to consider 2-byte matches. */
|
||||
if i > 0 && h.tiny_hash[uint16(prev_ix)] != tiny_hash {
|
||||
continue
|
||||
}
|
||||
if prev_ix >= cur_ix || backward > max_backward {
|
||||
continue
|
||||
}
|
||||
|
||||
prev_ix &= ring_buffer_mask
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 2 {
|
||||
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
|
||||
if best_score < score {
|
||||
if i != 0 {
|
||||
score -= backwardReferencePenaltyUsingLastDistance(uint(i))
|
||||
}
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = best_score
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var bank uint = key & (h.numBanks - 1)
|
||||
var backward uint = 0
|
||||
var hops uint = h.max_hops
|
||||
var delta uint = cur_ix - uint(h.addr[key])
|
||||
var slot uint = uint(h.head[key])
|
||||
for {
|
||||
tmp6 := hops
|
||||
hops--
|
||||
if tmp6 == 0 {
|
||||
break
|
||||
}
|
||||
var prev_ix uint
|
||||
var last uint = slot
|
||||
backward += delta
|
||||
if backward > max_backward {
|
||||
break
|
||||
}
|
||||
prev_ix = (cur_ix - backward) & ring_buffer_mask
|
||||
slot = uint(h.banks[bank][last].next)
|
||||
delta = uint(h.banks[bank][last].delta)
|
||||
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 {
|
||||
/* Comparing for >= 3 does not change the semantics, but just saves
|
||||
for a few unnecessary binary logarithms in backward reference
|
||||
score, since we are not interested in such short matches. */
|
||||
var score uint = backwardReferenceScore(uint(len), backward)
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = best_score
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.Store(data, ring_buffer_mask, cur_ix)
|
||||
}
|
||||
|
||||
if out.score == min_score {
|
||||
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
|
||||
}
|
||||
}
|
||||
214
vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go
generated
vendored
Normal file
214
vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* For BUCKET_SWEEP == 1, enabling the dictionary lookup makes compression
|
||||
a little faster (0.5% - 1%) and it compresses 0.15% better on small text
|
||||
and HTML inputs. */
|
||||
|
||||
func (*hashLongestMatchQuickly) HashTypeLength() uint {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (*hashLongestMatchQuickly) StoreLookahead() uint {
|
||||
return 8
|
||||
}
|
||||
|
||||
/* HashBytes is the function that chooses the bucket to place
|
||||
the address in. The HashLongestMatch and hashLongestMatchQuickly
|
||||
classes have separate, different implementations of hashing. */
|
||||
func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {
|
||||
var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64)
|
||||
|
||||
/* The higher bits contain more mixture from the multiplication,
|
||||
so we take our results from there. */
|
||||
return uint32(hash >> (64 - h.bucketBits))
|
||||
}
|
||||
|
||||
/* A (forgetful) hash table to the data seen by the compressor, to
|
||||
help create backward references to previous data.
|
||||
|
||||
This is a hash map of fixed size (1 << 16). Starting from the
|
||||
given index, 1 buckets are used to store values of a key. */
|
||||
type hashLongestMatchQuickly struct {
|
||||
hasherCommon
|
||||
|
||||
bucketBits uint
|
||||
bucketSweep int
|
||||
hashLen uint
|
||||
useDictionary bool
|
||||
|
||||
buckets []uint32
|
||||
}
|
||||
|
||||
func (h *hashLongestMatchQuickly) Initialize(params *encoderParams) {
|
||||
h.buckets = make([]uint32, 1<<h.bucketBits+h.bucketSweep)
|
||||
}
|
||||
|
||||
func (h *hashLongestMatchQuickly) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
var partial_prepare_threshold uint = (4 << h.bucketBits) >> 7
|
||||
/* Partial preparation is 100 times slower (per socket). */
|
||||
if one_shot && input_size <= partial_prepare_threshold {
|
||||
var i uint
|
||||
for i = 0; i < input_size; i++ {
|
||||
var key uint32 = h.HashBytes(data[i:])
|
||||
for j := 0; j < h.bucketSweep; j++ {
|
||||
h.buckets[key+uint32(j)] = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* It is not strictly necessary to fill this buffer here, but
|
||||
not filling will make the results of the compression stochastic
|
||||
(but correct). This is because random data would cause the
|
||||
system to find accidentally good backward references here and there. */
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Look at 5 bytes at &data[ix & mask].
|
||||
Compute a hash from these, and store the value somewhere within
|
||||
[ix .. ix+3]. */
|
||||
func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) {
|
||||
var key uint32 = h.HashBytes(data[ix&mask:])
|
||||
var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep)
|
||||
/* Wiggle the value with the bucket sweep range. */
|
||||
h.buckets[key+off] = uint32(ix)
|
||||
}
|
||||
|
||||
func (h *hashLongestMatchQuickly) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
var i uint
|
||||
for i = ix_start; i < ix_end; i++ {
|
||||
h.Store(data, mask, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
|
||||
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
|
||||
/* Prepare the hashes for three last bytes of the last write.
|
||||
These could not be calculated before, since they require knowledge
|
||||
of both the previous and the current block. */
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-3)
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-2)
|
||||
h.Store(ringbuffer, ringbuffer_mask, position-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) {
|
||||
}
|
||||
|
||||
/* Find a longest backward match of &data[cur_ix & ring_buffer_mask]
|
||||
up to the length of max_length and stores the position cur_ix in the
|
||||
hash table.
|
||||
|
||||
Does not look for matches longer than max_length.
|
||||
Does not look for matches further away than max_backward.
|
||||
Writes the best match into |out|.
|
||||
|out|->score is updated only if a better match is found. */
|
||||
func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
var best_len_in uint = out.len
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var key uint32 = h.HashBytes(data[cur_ix_masked:])
|
||||
var compare_char int = int(data[cur_ix_masked+best_len_in])
|
||||
var min_score uint = out.score
|
||||
var best_score uint = out.score
|
||||
var best_len uint = best_len_in
|
||||
var cached_backward uint = uint(distance_cache[0])
|
||||
var prev_ix uint = cur_ix - cached_backward
|
||||
var bucket []uint32
|
||||
out.len_code_delta = 0
|
||||
if prev_ix < cur_ix {
|
||||
prev_ix &= uint(uint32(ring_buffer_mask))
|
||||
if compare_char == int(data[prev_ix+best_len]) {
|
||||
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 {
|
||||
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = uint(len)
|
||||
out.distance = cached_backward
|
||||
out.score = best_score
|
||||
compare_char = int(data[cur_ix_masked+best_len])
|
||||
if h.bucketSweep == 1 {
|
||||
h.buckets[key] = uint32(cur_ix)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if h.bucketSweep == 1 {
|
||||
var backward uint
|
||||
var len uint
|
||||
|
||||
/* Only one to look for, don't bother to prepare for a loop. */
|
||||
prev_ix = uint(h.buckets[key])
|
||||
|
||||
h.buckets[key] = uint32(cur_ix)
|
||||
backward = cur_ix - prev_ix
|
||||
prev_ix &= uint(uint32(ring_buffer_mask))
|
||||
if compare_char != int(data[prev_ix+best_len_in]) {
|
||||
return
|
||||
}
|
||||
|
||||
if backward == 0 || backward > max_backward {
|
||||
return
|
||||
}
|
||||
|
||||
len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 {
|
||||
var score uint = backwardReferenceScore(uint(len), backward)
|
||||
if best_score < score {
|
||||
out.len = uint(len)
|
||||
out.distance = backward
|
||||
out.score = score
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bucket = h.buckets[key:]
|
||||
var i int
|
||||
prev_ix = uint(bucket[0])
|
||||
bucket = bucket[1:]
|
||||
for i = 0; i < h.bucketSweep; (func() { i++; tmp3 := bucket; bucket = bucket[1:]; prev_ix = uint(tmp3[0]) })() {
|
||||
var backward uint = cur_ix - prev_ix
|
||||
var len uint
|
||||
prev_ix &= uint(uint32(ring_buffer_mask))
|
||||
if compare_char != int(data[prev_ix+best_len]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if backward == 0 || backward > max_backward {
|
||||
continue
|
||||
}
|
||||
|
||||
len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 {
|
||||
var score uint = backwardReferenceScore(uint(len), backward)
|
||||
if best_score < score {
|
||||
best_score = score
|
||||
best_len = uint(len)
|
||||
out.len = best_len
|
||||
out.distance = backward
|
||||
out.score = score
|
||||
compare_char = int(data[cur_ix_masked+best_len])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if h.useDictionary && min_score == out.score {
|
||||
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, true)
|
||||
}
|
||||
|
||||
h.buckets[key+uint32((cur_ix>>3)%uint(h.bucketSweep))] = uint32(cur_ix)
|
||||
}
|
||||
168
vendor/github.com/andybalholm/brotli/hash_rolling.go
generated
vendored
Normal file
168
vendor/github.com/andybalholm/brotli/hash_rolling.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2018 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* NOTE: this hasher does not search in the dictionary. It is used as
|
||||
backup-hasher, the main hasher already searches in it. */
|
||||
|
||||
const kRollingHashMul32 uint32 = 69069
|
||||
|
||||
const kInvalidPosHashRolling uint32 = 0xffffffff
|
||||
|
||||
/* This hasher uses a longer forward length, but returning a higher value here
|
||||
will hurt compression by the main hasher when combined with a composite
|
||||
hasher. The hasher tests for forward itself instead. */
|
||||
func (*hashRolling) HashTypeLength() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (*hashRolling) StoreLookahead() uint {
|
||||
return 4
|
||||
}
|
||||
|
||||
/* Computes a code from a single byte. A lookup table of 256 values could be
|
||||
used, but simply adding 1 works about as good. */
|
||||
func (*hashRolling) HashByte(b byte) uint32 {
|
||||
return uint32(b) + 1
|
||||
}
|
||||
|
||||
func (h *hashRolling) HashRollingFunctionInitial(state uint32, add byte, factor uint32) uint32 {
|
||||
return uint32(factor*state + h.HashByte(add))
|
||||
}
|
||||
|
||||
func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, factor uint32, factor_remove uint32) uint32 {
|
||||
return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem))
|
||||
}
|
||||
|
||||
/* Rolling hash for long distance long string matches. Stores one position
|
||||
per bucket, bucket key is computed over a long region. */
|
||||
type hashRolling struct {
|
||||
hasherCommon
|
||||
|
||||
jump int
|
||||
|
||||
state uint32
|
||||
table []uint32
|
||||
next_ix uint
|
||||
factor uint32
|
||||
factor_remove uint32
|
||||
}
|
||||
|
||||
func (h *hashRolling) Initialize(params *encoderParams) {
|
||||
h.state = 0
|
||||
h.next_ix = 0
|
||||
|
||||
h.factor = kRollingHashMul32
|
||||
|
||||
/* Compute the factor of the oldest byte to remove: factor**steps modulo
|
||||
0xffffffff (the multiplications rely on 32-bit overflow) */
|
||||
h.factor_remove = 1
|
||||
|
||||
for i := 0; i < 32; i += h.jump {
|
||||
h.factor_remove *= h.factor
|
||||
}
|
||||
|
||||
h.table = make([]uint32, 16777216)
|
||||
for i := 0; i < 16777216; i++ {
|
||||
h.table[i] = kInvalidPosHashRolling
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashRolling) Prepare(one_shot bool, input_size uint, data []byte) {
|
||||
/* Too small size, cannot use this hasher. */
|
||||
if input_size < 32 {
|
||||
return
|
||||
}
|
||||
h.state = 0
|
||||
for i := 0; i < 32; i += h.jump {
|
||||
h.state = h.HashRollingFunctionInitial(h.state, data[i], h.factor)
|
||||
}
|
||||
}
|
||||
|
||||
func (*hashRolling) Store(data []byte, mask uint, ix uint) {
|
||||
}
|
||||
|
||||
func (*hashRolling) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
|
||||
}
|
||||
|
||||
func (h *hashRolling) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
|
||||
var position_masked uint
|
||||
/* In this case we must re-initialize the hasher from scratch from the
|
||||
current position. */
|
||||
|
||||
var available uint = num_bytes
|
||||
if position&uint(h.jump-1) != 0 {
|
||||
var diff uint = uint(h.jump) - (position & uint(h.jump-1))
|
||||
if diff > available {
|
||||
available = 0
|
||||
} else {
|
||||
available = available - diff
|
||||
}
|
||||
position += diff
|
||||
}
|
||||
|
||||
position_masked = position & ring_buffer_mask
|
||||
|
||||
/* wrapping around ringbuffer not handled. */
|
||||
if available > ring_buffer_mask-position_masked {
|
||||
available = ring_buffer_mask - position_masked
|
||||
}
|
||||
|
||||
h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:])
|
||||
h.next_ix = position
|
||||
}
|
||||
|
||||
func (*hashRolling) PrepareDistanceCache(distance_cache []int) {
|
||||
}
|
||||
|
||||
func (h *hashRolling) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
|
||||
var cur_ix_masked uint = cur_ix & ring_buffer_mask
|
||||
var pos uint = h.next_ix
|
||||
|
||||
if cur_ix&uint(h.jump-1) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
/* Not enough lookahead */
|
||||
if max_length < 32 {
|
||||
return
|
||||
}
|
||||
|
||||
for pos = h.next_ix; pos <= cur_ix; pos += uint(h.jump) {
|
||||
var code uint32 = h.state & ((16777216 * 64) - 1)
|
||||
var rem byte = data[pos&ring_buffer_mask]
|
||||
var add byte = data[(pos+32)&ring_buffer_mask]
|
||||
var found_ix uint = uint(kInvalidPosHashRolling)
|
||||
|
||||
h.state = h.HashRollingFunction(h.state, add, rem, h.factor, h.factor_remove)
|
||||
|
||||
if code < 16777216 {
|
||||
found_ix = uint(h.table[code])
|
||||
h.table[code] = uint32(pos)
|
||||
if pos == cur_ix && uint32(found_ix) != kInvalidPosHashRolling {
|
||||
/* The cast to 32-bit makes backward distances up to 4GB work even
|
||||
if cur_ix is above 4GB, despite using 32-bit values in the table. */
|
||||
var backward uint = uint(uint32(cur_ix - found_ix))
|
||||
if backward <= max_backward {
|
||||
var found_ix_masked uint = found_ix & ring_buffer_mask
|
||||
var len uint = findMatchLengthWithLimit(data[found_ix_masked:], data[cur_ix_masked:], max_length)
|
||||
if len >= 4 && len > out.len {
|
||||
var score uint = backwardReferenceScore(uint(len), backward)
|
||||
if score > out.score {
|
||||
out.len = uint(len)
|
||||
out.distance = backward
|
||||
out.score = score
|
||||
out.len_code_delta = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.next_ix = cur_ix + uint(h.jump)
|
||||
}
|
||||
226
vendor/github.com/andybalholm/brotli/histogram.go
generated
vendored
Normal file
226
vendor/github.com/andybalholm/brotli/histogram.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* The distance symbols effectively used by "Large Window Brotli" (32-bit). */
|
||||
const numHistogramDistanceSymbols = 544
|
||||
|
||||
type histogramLiteral struct {
|
||||
data_ [numLiteralSymbols]uint32
|
||||
total_count_ uint
|
||||
bit_cost_ float64
|
||||
}
|
||||
|
||||
func histogramClearLiteral(self *histogramLiteral) {
|
||||
self.data_ = [numLiteralSymbols]uint32{}
|
||||
self.total_count_ = 0
|
||||
self.bit_cost_ = math.MaxFloat64
|
||||
}
|
||||
|
||||
func clearHistogramsLiteral(array []histogramLiteral, length uint) {
|
||||
var i uint
|
||||
for i = 0; i < length; i++ {
|
||||
histogramClearLiteral(&array[i:][0])
|
||||
}
|
||||
}
|
||||
|
||||
func histogramAddLiteral(self *histogramLiteral, val uint) {
|
||||
self.data_[val]++
|
||||
self.total_count_++
|
||||
}
|
||||
|
||||
func histogramAddVectorLiteral(self *histogramLiteral, p []byte, n uint) {
|
||||
self.total_count_ += n
|
||||
n += 1
|
||||
for {
|
||||
n--
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
self.data_[p[0]]++
|
||||
p = p[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func histogramAddHistogramLiteral(self *histogramLiteral, v *histogramLiteral) {
|
||||
var i uint
|
||||
self.total_count_ += v.total_count_
|
||||
for i = 0; i < numLiteralSymbols; i++ {
|
||||
self.data_[i] += v.data_[i]
|
||||
}
|
||||
}
|
||||
|
||||
func histogramDataSizeLiteral() uint {
|
||||
return numLiteralSymbols
|
||||
}
|
||||
|
||||
type histogramCommand struct {
|
||||
data_ [numCommandSymbols]uint32
|
||||
total_count_ uint
|
||||
bit_cost_ float64
|
||||
}
|
||||
|
||||
func histogramClearCommand(self *histogramCommand) {
|
||||
self.data_ = [numCommandSymbols]uint32{}
|
||||
self.total_count_ = 0
|
||||
self.bit_cost_ = math.MaxFloat64
|
||||
}
|
||||
|
||||
func clearHistogramsCommand(array []histogramCommand, length uint) {
|
||||
var i uint
|
||||
for i = 0; i < length; i++ {
|
||||
histogramClearCommand(&array[i:][0])
|
||||
}
|
||||
}
|
||||
|
||||
func histogramAddCommand(self *histogramCommand, val uint) {
|
||||
self.data_[val]++
|
||||
self.total_count_++
|
||||
}
|
||||
|
||||
func histogramAddVectorCommand(self *histogramCommand, p []uint16, n uint) {
|
||||
self.total_count_ += n
|
||||
n += 1
|
||||
for {
|
||||
n--
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
self.data_[p[0]]++
|
||||
p = p[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func histogramAddHistogramCommand(self *histogramCommand, v *histogramCommand) {
|
||||
var i uint
|
||||
self.total_count_ += v.total_count_
|
||||
for i = 0; i < numCommandSymbols; i++ {
|
||||
self.data_[i] += v.data_[i]
|
||||
}
|
||||
}
|
||||
|
||||
func histogramDataSizeCommand() uint {
|
||||
return numCommandSymbols
|
||||
}
|
||||
|
||||
type histogramDistance struct {
|
||||
data_ [numDistanceSymbols]uint32
|
||||
total_count_ uint
|
||||
bit_cost_ float64
|
||||
}
|
||||
|
||||
func histogramClearDistance(self *histogramDistance) {
|
||||
self.data_ = [numDistanceSymbols]uint32{}
|
||||
self.total_count_ = 0
|
||||
self.bit_cost_ = math.MaxFloat64
|
||||
}
|
||||
|
||||
func clearHistogramsDistance(array []histogramDistance, length uint) {
|
||||
var i uint
|
||||
for i = 0; i < length; i++ {
|
||||
histogramClearDistance(&array[i:][0])
|
||||
}
|
||||
}
|
||||
|
||||
func histogramAddDistance(self *histogramDistance, val uint) {
|
||||
self.data_[val]++
|
||||
self.total_count_++
|
||||
}
|
||||
|
||||
func histogramAddVectorDistance(self *histogramDistance, p []uint16, n uint) {
|
||||
self.total_count_ += n
|
||||
n += 1
|
||||
for {
|
||||
n--
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
self.data_[p[0]]++
|
||||
p = p[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func histogramAddHistogramDistance(self *histogramDistance, v *histogramDistance) {
|
||||
var i uint
|
||||
self.total_count_ += v.total_count_
|
||||
for i = 0; i < numDistanceSymbols; i++ {
|
||||
self.data_[i] += v.data_[i]
|
||||
}
|
||||
}
|
||||
|
||||
func histogramDataSizeDistance() uint {
|
||||
return numDistanceSymbols
|
||||
}
|
||||
|
||||
type blockSplitIterator struct {
|
||||
split_ *blockSplit
|
||||
idx_ uint
|
||||
type_ uint
|
||||
length_ uint
|
||||
}
|
||||
|
||||
func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) {
|
||||
self.split_ = split
|
||||
self.idx_ = 0
|
||||
self.type_ = 0
|
||||
if len(split.lengths) > 0 {
|
||||
self.length_ = uint(split.lengths[0])
|
||||
} else {
|
||||
self.length_ = 0
|
||||
}
|
||||
}
|
||||
|
||||
func blockSplitIteratorNext(self *blockSplitIterator) {
|
||||
if self.length_ == 0 {
|
||||
self.idx_++
|
||||
self.type_ = uint(self.split_.types[self.idx_])
|
||||
self.length_ = uint(self.split_.lengths[self.idx_])
|
||||
}
|
||||
|
||||
self.length_--
|
||||
}
|
||||
|
||||
func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) {
|
||||
var pos uint = start_pos
|
||||
var literal_it blockSplitIterator
|
||||
var insert_and_copy_it blockSplitIterator
|
||||
var dist_it blockSplitIterator
|
||||
|
||||
initBlockSplitIterator(&literal_it, literal_split)
|
||||
initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split)
|
||||
initBlockSplitIterator(&dist_it, dist_split)
|
||||
for i := range cmds {
|
||||
var cmd *command = &cmds[i]
|
||||
var j uint
|
||||
blockSplitIteratorNext(&insert_and_copy_it)
|
||||
histogramAddCommand(&insert_and_copy_histograms[insert_and_copy_it.type_], uint(cmd.cmd_prefix_))
|
||||
|
||||
/* TODO: unwrap iterator blocks. */
|
||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||
var context uint
|
||||
blockSplitIteratorNext(&literal_it)
|
||||
context = literal_it.type_
|
||||
if context_modes != nil {
|
||||
var lut contextLUT = getContextLUT(context_modes[context])
|
||||
context = (context << literalContextBits) + uint(getContext(prev_byte, prev_byte2, lut))
|
||||
}
|
||||
|
||||
histogramAddLiteral(&literal_histograms[context], uint(ringbuffer[pos&mask]))
|
||||
prev_byte2 = prev_byte
|
||||
prev_byte = ringbuffer[pos&mask]
|
||||
pos++
|
||||
}
|
||||
|
||||
pos += uint(commandCopyLen(cmd))
|
||||
if commandCopyLen(cmd) != 0 {
|
||||
prev_byte2 = ringbuffer[(pos-2)&mask]
|
||||
prev_byte = ringbuffer[(pos-1)&mask]
|
||||
if cmd.cmd_prefix_ >= 128 {
|
||||
var context uint
|
||||
blockSplitIteratorNext(&dist_it)
|
||||
context = uint(uint32(dist_it.type_<<distanceContextBits) + commandDistanceContext(cmd))
|
||||
histogramAddDistance(©_dist_histograms[context], uint(cmd.dist_prefix_)&0x3FF)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
184
vendor/github.com/andybalholm/brotli/http.go
generated
vendored
Normal file
184
vendor/github.com/andybalholm/brotli/http.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HTTPCompressor chooses a compression method (brotli, gzip, or none) based on
|
||||
// the Accept-Encoding header, sets the Content-Encoding header, and returns a
|
||||
// WriteCloser that implements that compression. The Close method must be called
|
||||
// before the current HTTP handler returns.
|
||||
func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser {
|
||||
if w.Header().Get("Vary") == "" {
|
||||
w.Header().Set("Vary", "Accept-Encoding")
|
||||
}
|
||||
|
||||
encoding := negotiateContentEncoding(r, []string{"br", "gzip"})
|
||||
switch encoding {
|
||||
case "br":
|
||||
w.Header().Set("Content-Encoding", "br")
|
||||
return NewWriterV2(w, DefaultCompression)
|
||||
case "gzip":
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
return gzip.NewWriter(w)
|
||||
}
|
||||
return nopCloser{w}
|
||||
}
|
||||
|
||||
// negotiateContentEncoding returns the best offered content encoding for the
|
||||
// request's Accept-Encoding header. If two offers match with equal weight and
|
||||
// then the offer earlier in the list is preferred. If no offers are
|
||||
// acceptable, then "" is returned.
|
||||
func negotiateContentEncoding(r *http.Request, offers []string) string {
|
||||
bestOffer := "identity"
|
||||
bestQ := -1.0
|
||||
specs := parseAccept(r.Header, "Accept-Encoding")
|
||||
for _, offer := range offers {
|
||||
for _, spec := range specs {
|
||||
if spec.Q > bestQ &&
|
||||
(spec.Value == "*" || spec.Value == offer) {
|
||||
bestQ = spec.Q
|
||||
bestOffer = offer
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestQ == 0 {
|
||||
bestOffer = ""
|
||||
}
|
||||
return bestOffer
|
||||
}
|
||||
|
||||
// acceptSpec describes an Accept* header.
|
||||
type acceptSpec struct {
|
||||
Value string
|
||||
Q float64
|
||||
}
|
||||
|
||||
// parseAccept parses Accept* headers.
|
||||
func parseAccept(header http.Header, key string) (specs []acceptSpec) {
|
||||
loop:
|
||||
for _, s := range header[key] {
|
||||
for {
|
||||
var spec acceptSpec
|
||||
spec.Value, s = expectTokenSlash(s)
|
||||
if spec.Value == "" {
|
||||
continue loop
|
||||
}
|
||||
spec.Q = 1.0
|
||||
s = skipSpace(s)
|
||||
if strings.HasPrefix(s, ";") {
|
||||
s = skipSpace(s[1:])
|
||||
if !strings.HasPrefix(s, "q=") {
|
||||
continue loop
|
||||
}
|
||||
spec.Q, s = expectQuality(s[2:])
|
||||
if spec.Q < 0.0 {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
specs = append(specs, spec)
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
continue loop
|
||||
}
|
||||
s = skipSpace(s[1:])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectTokenSlash(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if (octetTypes[b]&isToken == 0) && b != '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectQuality(s string) (q float64, rest string) {
|
||||
switch {
|
||||
case len(s) == 0:
|
||||
return -1, ""
|
||||
case s[0] == '0':
|
||||
q = 0
|
||||
case s[0] == '1':
|
||||
q = 1
|
||||
default:
|
||||
return -1, ""
|
||||
}
|
||||
s = s[1:]
|
||||
if !strings.HasPrefix(s, ".") {
|
||||
return q, s
|
||||
}
|
||||
s = s[1:]
|
||||
i := 0
|
||||
n := 0
|
||||
d := 1
|
||||
for ; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if b < '0' || b > '9' {
|
||||
break
|
||||
}
|
||||
n = n*10 + int(b) - '0'
|
||||
d *= 10
|
||||
}
|
||||
return q + float64(n)/float64(d), s[i:]
|
||||
}
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
var octetTypes [256]octetType
|
||||
|
||||
type octetType byte
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
653
vendor/github.com/andybalholm/brotli/huffman.go
generated
vendored
Normal file
653
vendor/github.com/andybalholm/brotli/huffman.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
182
vendor/github.com/andybalholm/brotli/literal_cost.go
generated
vendored
Normal file
182
vendor/github.com/andybalholm/brotli/literal_cost.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package brotli
|
||||
|
||||
func utf8Position(last uint, c uint, clamp uint) uint {
|
||||
if c < 128 {
|
||||
return 0 /* Next one is the 'Byte 1' again. */
|
||||
} else if c >= 192 { /* Next one is the 'Byte 2' of utf-8 encoding. */
|
||||
return brotli_min_size_t(1, clamp)
|
||||
} else {
|
||||
/* Let's decide over the last byte if this ends the sequence. */
|
||||
if last < 0xE0 {
|
||||
return 0 /* Completed two or three byte coding. */ /* Next one is the 'Byte 3' of utf-8 encoding. */
|
||||
} else {
|
||||
return brotli_min_size_t(2, clamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decideMultiByteStatsLevel(pos uint, len uint, mask uint, data []byte) uint {
|
||||
var counts = [3]uint{0} /* should be 2, but 1 compresses better. */
|
||||
var max_utf8 uint = 1
|
||||
var last_c uint = 0
|
||||
var i uint
|
||||
for i = 0; i < len; i++ {
|
||||
var c uint = uint(data[(pos+i)&mask])
|
||||
counts[utf8Position(last_c, c, 2)]++
|
||||
last_c = c
|
||||
}
|
||||
|
||||
if counts[2] < 500 {
|
||||
max_utf8 = 1
|
||||
}
|
||||
|
||||
if counts[1]+counts[2] < 25 {
|
||||
max_utf8 = 0
|
||||
}
|
||||
|
||||
return max_utf8
|
||||
}
|
||||
|
||||
func estimateBitCostsForLiteralsUTF8(pos uint, len uint, mask uint, data []byte, cost []float32) {
|
||||
var max_utf8 uint = decideMultiByteStatsLevel(pos, uint(len), mask, data)
|
||||
/* Bootstrap histograms. */
|
||||
var histogram = [3][256]uint{[256]uint{0}}
|
||||
var window_half uint = 495
|
||||
var in_window uint = brotli_min_size_t(window_half, uint(len))
|
||||
var in_window_utf8 = [3]uint{0}
|
||||
/* max_utf8 is 0 (normal ASCII single byte modeling),
|
||||
1 (for 2-byte UTF-8 modeling), or 2 (for 3-byte UTF-8 modeling). */
|
||||
|
||||
var i uint
|
||||
{
|
||||
var last_c uint = 0
|
||||
var utf8_pos uint = 0
|
||||
for i = 0; i < in_window; i++ {
|
||||
var c uint = uint(data[(pos+i)&mask])
|
||||
histogram[utf8_pos][c]++
|
||||
in_window_utf8[utf8_pos]++
|
||||
utf8_pos = utf8Position(last_c, c, max_utf8)
|
||||
last_c = c
|
||||
}
|
||||
}
|
||||
|
||||
/* Compute bit costs with sliding window. */
|
||||
for i = 0; i < len; i++ {
|
||||
if i >= window_half {
|
||||
var c uint
|
||||
var last_c uint
|
||||
if i < window_half+1 {
|
||||
c = 0
|
||||
} else {
|
||||
c = uint(data[(pos+i-window_half-1)&mask])
|
||||
}
|
||||
if i < window_half+2 {
|
||||
last_c = 0
|
||||
} else {
|
||||
last_c = uint(data[(pos+i-window_half-2)&mask])
|
||||
}
|
||||
/* Remove a byte in the past. */
|
||||
|
||||
var utf8_pos2 uint = utf8Position(last_c, c, max_utf8)
|
||||
histogram[utf8_pos2][data[(pos+i-window_half)&mask]]--
|
||||
in_window_utf8[utf8_pos2]--
|
||||
}
|
||||
|
||||
if i+window_half < len {
|
||||
var c uint = uint(data[(pos+i+window_half-1)&mask])
|
||||
var last_c uint = uint(data[(pos+i+window_half-2)&mask])
|
||||
/* Add a byte in the future. */
|
||||
|
||||
var utf8_pos2 uint = utf8Position(last_c, c, max_utf8)
|
||||
histogram[utf8_pos2][data[(pos+i+window_half)&mask]]++
|
||||
in_window_utf8[utf8_pos2]++
|
||||
}
|
||||
{
|
||||
var c uint
|
||||
var last_c uint
|
||||
if i < 1 {
|
||||
c = 0
|
||||
} else {
|
||||
c = uint(data[(pos+i-1)&mask])
|
||||
}
|
||||
if i < 2 {
|
||||
last_c = 0
|
||||
} else {
|
||||
last_c = uint(data[(pos+i-2)&mask])
|
||||
}
|
||||
var utf8_pos uint = utf8Position(last_c, c, max_utf8)
|
||||
var masked_pos uint = (pos + i) & mask
|
||||
var histo uint = histogram[utf8_pos][data[masked_pos]]
|
||||
var lit_cost float64
|
||||
if histo == 0 {
|
||||
histo = 1
|
||||
}
|
||||
|
||||
lit_cost = fastLog2(in_window_utf8[utf8_pos]) - fastLog2(histo)
|
||||
lit_cost += 0.02905
|
||||
if lit_cost < 1.0 {
|
||||
lit_cost *= 0.5
|
||||
lit_cost += 0.5
|
||||
}
|
||||
|
||||
/* Make the first bytes more expensive -- seems to help, not sure why.
|
||||
Perhaps because the entropy source is changing its properties
|
||||
rapidly in the beginning of the file, perhaps because the beginning
|
||||
of the data is a statistical "anomaly". */
|
||||
if i < 2000 {
|
||||
lit_cost += 0.7 - (float64(2000-i) / 2000.0 * 0.35)
|
||||
}
|
||||
|
||||
cost[i] = float32(lit_cost)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func estimateBitCostsForLiterals(pos uint, len uint, mask uint, data []byte, cost []float32) {
|
||||
if isMostlyUTF8(data, pos, mask, uint(len), kMinUTF8Ratio) {
|
||||
estimateBitCostsForLiteralsUTF8(pos, uint(len), mask, data, cost)
|
||||
return
|
||||
} else {
|
||||
var histogram = [256]uint{0}
|
||||
var window_half uint = 2000
|
||||
var in_window uint = brotli_min_size_t(window_half, uint(len))
|
||||
var i uint
|
||||
/* Bootstrap histogram. */
|
||||
for i = 0; i < in_window; i++ {
|
||||
histogram[data[(pos+i)&mask]]++
|
||||
}
|
||||
|
||||
/* Compute bit costs with sliding window. */
|
||||
for i = 0; i < len; i++ {
|
||||
var histo uint
|
||||
if i >= window_half {
|
||||
/* Remove a byte in the past. */
|
||||
histogram[data[(pos+i-window_half)&mask]]--
|
||||
|
||||
in_window--
|
||||
}
|
||||
|
||||
if i+window_half < len {
|
||||
/* Add a byte in the future. */
|
||||
histogram[data[(pos+i+window_half)&mask]]++
|
||||
|
||||
in_window++
|
||||
}
|
||||
|
||||
histo = histogram[data[(pos+i)&mask]]
|
||||
if histo == 0 {
|
||||
histo = 1
|
||||
}
|
||||
{
|
||||
var lit_cost float64 = fastLog2(in_window) - fastLog2(histo)
|
||||
lit_cost += 0.029
|
||||
if lit_cost < 1.0 {
|
||||
lit_cost *= 0.5
|
||||
lit_cost += 0.5
|
||||
}
|
||||
|
||||
cost[i] = float32(lit_cost)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/andybalholm/brotli/matchfinder/emitter.go
generated
vendored
Normal file
45
vendor/github.com/andybalholm/brotli/matchfinder/emitter.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package matchfinder
|
||||
|
||||
// An absoluteMatch is like a Match, but it stores indexes into the byte
|
||||
// stream instead of lengths.
|
||||
type absoluteMatch struct {
|
||||
// Start is the index of the first byte.
|
||||
Start int
|
||||
|
||||
// End is the index of the byte after the last byte
|
||||
// (so that End - Start = Length).
|
||||
End int
|
||||
|
||||
// Match is the index of the previous data that matches
|
||||
// (Start - Match = Distance).
|
||||
Match int
|
||||
}
|
||||
|
||||
// A matchEmitter manages the output of matches for a MatchFinder.
|
||||
type matchEmitter struct {
|
||||
// Dst is the destination slice that Matches are added to.
|
||||
Dst []Match
|
||||
|
||||
// NextEmit is the index of the next byte to emit.
|
||||
NextEmit int
|
||||
}
|
||||
|
||||
func (e *matchEmitter) emit(m absoluteMatch) {
|
||||
e.Dst = append(e.Dst, Match{
|
||||
Unmatched: m.Start - e.NextEmit,
|
||||
Length: m.End - m.Start,
|
||||
Distance: m.Start - m.Match,
|
||||
})
|
||||
e.NextEmit = m.End
|
||||
}
|
||||
|
||||
// trim shortens m if it extends past maxEnd. Then if the length is at least
|
||||
// minLength, the match is emitted.
|
||||
func (e *matchEmitter) trim(m absoluteMatch, maxEnd int, minLength int) {
|
||||
if m.End > maxEnd {
|
||||
m.End = maxEnd
|
||||
}
|
||||
if m.End-m.Start >= minLength {
|
||||
e.emit(m)
|
||||
}
|
||||
}
|
||||
169
vendor/github.com/andybalholm/brotli/matchfinder/m0.go
generated
vendored
Normal file
169
vendor/github.com/andybalholm/brotli/matchfinder/m0.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
package matchfinder
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// M0 is an implementation of the MatchFinder interface based
|
||||
// on the algorithm used by snappy, but modified to be more like the algorithm
|
||||
// used by compression level 0 of the brotli reference implementation.
|
||||
//
|
||||
// It has a maximum block size of 65536 bytes.
|
||||
type M0 struct {
|
||||
// Lazy turns on "lazy matching," for higher compression but less speed.
|
||||
Lazy bool
|
||||
|
||||
MaxDistance int
|
||||
MaxLength int
|
||||
}
|
||||
|
||||
func (M0) Reset() {}
|
||||
|
||||
const (
|
||||
m0HashLen = 5
|
||||
|
||||
m0TableBits = 14
|
||||
m0TableSize = 1 << m0TableBits
|
||||
m0Shift = 32 - m0TableBits
|
||||
// m0TableMask is redundant, but helps the compiler eliminate bounds
|
||||
// checks.
|
||||
m0TableMask = m0TableSize - 1
|
||||
)
|
||||
|
||||
func (m M0) hash(data uint64) uint64 {
|
||||
hash := (data << (64 - 8*m0HashLen)) * hashMul64
|
||||
return hash >> (64 - m0TableBits)
|
||||
}
|
||||
|
||||
// FindMatches looks for matches in src, appends them to dst, and returns dst.
|
||||
// src must not be longer than 65536 bytes.
|
||||
func (m M0) FindMatches(dst []Match, src []byte) []Match {
|
||||
const inputMargin = 16 - 1
|
||||
const minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
dst = append(dst, Match{
|
||||
Unmatched: len(src),
|
||||
})
|
||||
return dst
|
||||
}
|
||||
if len(src) > 65536 {
|
||||
panic("block too long")
|
||||
}
|
||||
|
||||
var table [m0TableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := m.hash(binary.LittleEndian.Uint64(src[s:]))
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&m0TableMask])
|
||||
table[nextHash&m0TableMask] = uint16(s)
|
||||
nextHash = m.hash(binary.LittleEndian.Uint64(src[nextS:]))
|
||||
if m.MaxDistance != 0 && s-candidate > m.MaxDistance {
|
||||
continue
|
||||
}
|
||||
if binary.LittleEndian.Uint32(src[s:]) == binary.LittleEndian.Uint32(src[candidate:]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Invariant: we have a 4-byte match at s.
|
||||
base := s
|
||||
s = extendMatch(src, candidate+4, s+4)
|
||||
|
||||
origBase := base
|
||||
if m.Lazy && base+1 < sLimit {
|
||||
newBase := base + 1
|
||||
h := m.hash(binary.LittleEndian.Uint64(src[newBase:]))
|
||||
newCandidate := int(table[h&m0TableMask])
|
||||
table[h&m0TableMask] = uint16(newBase)
|
||||
okDistance := true
|
||||
if m.MaxDistance != 0 && newBase-newCandidate > m.MaxDistance {
|
||||
okDistance = false
|
||||
}
|
||||
if okDistance && binary.LittleEndian.Uint32(src[newBase:]) == binary.LittleEndian.Uint32(src[newCandidate:]) {
|
||||
newS := extendMatch(src, newCandidate+4, newBase+4)
|
||||
if newS-newBase > s-base+1 {
|
||||
s = newS
|
||||
base = newBase
|
||||
candidate = newCandidate
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m.MaxLength != 0 && s-base > m.MaxLength {
|
||||
s = base + m.MaxLength
|
||||
}
|
||||
dst = append(dst, Match{
|
||||
Unmatched: base - nextEmit,
|
||||
Length: s - base,
|
||||
Distance: base - candidate,
|
||||
})
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if m.Lazy {
|
||||
// If lazy matching is enabled, we update the hash table for
|
||||
// every byte in the match.
|
||||
for i := origBase + 2; i < s-1; i++ {
|
||||
x := binary.LittleEndian.Uint64(src[i:])
|
||||
table[m.hash(x)&m0TableMask] = uint16(i)
|
||||
}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s.
|
||||
x := binary.LittleEndian.Uint64(src[s-1:])
|
||||
prevHash := m.hash(x >> 0)
|
||||
table[prevHash&m0TableMask] = uint16(s - 1)
|
||||
nextHash = m.hash(x >> 8)
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
dst = append(dst, Match{
|
||||
Unmatched: len(src) - nextEmit,
|
||||
})
|
||||
}
|
||||
return dst
|
||||
}
|
||||
297
vendor/github.com/andybalholm/brotli/matchfinder/m4.go
generated
vendored
Normal file
297
vendor/github.com/andybalholm/brotli/matchfinder/m4.go
generated
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
package matchfinder
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// M4 is an implementation of the MatchFinder
|
||||
// interface that uses a hash table to find matches,
|
||||
// optional match chains,
|
||||
// and the advanced parsing technique from
|
||||
// https://fastcompression.blogspot.com/2011/12/advanced-parsing-strategies.html.
|
||||
type M4 struct {
|
||||
// MaxDistance is the maximum distance (in bytes) to look back for
|
||||
// a match. The default is 65535.
|
||||
MaxDistance int
|
||||
|
||||
// MinLength is the length of the shortest match to return.
|
||||
// The default is 4.
|
||||
MinLength int
|
||||
|
||||
// HashLen is the number of bytes to use to calculate the hashes.
|
||||
// The maximum is 8 and the default is 6.
|
||||
HashLen int
|
||||
|
||||
// TableBits is the number of bits in the hash table indexes.
|
||||
// The default is 17 (128K entries).
|
||||
TableBits int
|
||||
|
||||
// ChainLength is how many entries to search on the "match chain" of older
|
||||
// locations with the same hash as the current location.
|
||||
ChainLength int
|
||||
|
||||
// DistanceBitCost is used when comparing two matches to see
|
||||
// which is better. The comparison is primarily based on the length
|
||||
// of the matches, but it can also take the distance into account,
|
||||
// in terms of the number of bits needed to represent the distance.
|
||||
// One byte of length is given a score of 256, so 32 (256/8) would
|
||||
// be a reasonable first guess for the value of one bit.
|
||||
// (The default is 0, which bases the comparison solely on length.)
|
||||
DistanceBitCost int
|
||||
|
||||
table []uint32
|
||||
chain []uint16
|
||||
|
||||
history []byte
|
||||
}
|
||||
|
||||
func (q *M4) Reset() {
|
||||
for i := range q.table {
|
||||
q.table[i] = 0
|
||||
}
|
||||
q.history = q.history[:0]
|
||||
q.chain = q.chain[:0]
|
||||
}
|
||||
|
||||
func (q *M4) score(m absoluteMatch) int {
|
||||
return (m.End-m.Start)*256 + bits.LeadingZeros32(uint32(m.Start-m.Match))*q.DistanceBitCost
|
||||
}
|
||||
|
||||
func (q *M4) FindMatches(dst []Match, src []byte) []Match {
|
||||
if q.MaxDistance == 0 {
|
||||
q.MaxDistance = 65535
|
||||
}
|
||||
if q.MinLength == 0 {
|
||||
q.MinLength = 4
|
||||
}
|
||||
if q.HashLen == 0 {
|
||||
q.HashLen = 6
|
||||
}
|
||||
if q.TableBits == 0 {
|
||||
q.TableBits = 17
|
||||
}
|
||||
if len(q.table) < 1<<q.TableBits {
|
||||
q.table = make([]uint32, 1<<q.TableBits)
|
||||
}
|
||||
|
||||
e := matchEmitter{Dst: dst}
|
||||
|
||||
if len(q.history) > q.MaxDistance*2 {
|
||||
// Trim down the history buffer.
|
||||
delta := len(q.history) - q.MaxDistance
|
||||
copy(q.history, q.history[delta:])
|
||||
q.history = q.history[:q.MaxDistance]
|
||||
if q.ChainLength > 0 {
|
||||
q.chain = q.chain[:q.MaxDistance]
|
||||
}
|
||||
|
||||
for i, v := range q.table {
|
||||
newV := int(v) - delta
|
||||
if newV < 0 {
|
||||
newV = 0
|
||||
}
|
||||
q.table[i] = uint32(newV)
|
||||
}
|
||||
}
|
||||
|
||||
// Append src to the history buffer.
|
||||
e.NextEmit = len(q.history)
|
||||
q.history = append(q.history, src...)
|
||||
if q.ChainLength > 0 {
|
||||
q.chain = append(q.chain, make([]uint16, len(src))...)
|
||||
}
|
||||
src = q.history
|
||||
|
||||
// matches stores the matches that have been found but not emitted,
|
||||
// in reverse order. (matches[0] is the most recent one.)
|
||||
var matches [3]absoluteMatch
|
||||
for i := e.NextEmit; i < len(src)-7; i++ {
|
||||
if matches[0] != (absoluteMatch{}) && i >= matches[0].End {
|
||||
// We have found some matches, and we're far enough along that we probably
|
||||
// won't find overlapping matches, so we might as well emit them.
|
||||
if matches[1] != (absoluteMatch{}) {
|
||||
e.trim(matches[1], matches[0].Start, q.MinLength)
|
||||
}
|
||||
e.emit(matches[0])
|
||||
matches = [3]absoluteMatch{}
|
||||
}
|
||||
|
||||
// Calculate and store the hash.
|
||||
h := ((binary.LittleEndian.Uint64(src[i:]) & (1<<(8*q.HashLen) - 1)) * hashMul64) >> (64 - q.TableBits)
|
||||
candidate := int(q.table[h])
|
||||
q.table[h] = uint32(i)
|
||||
if q.ChainLength > 0 && candidate != 0 {
|
||||
delta := i - candidate
|
||||
if delta < 1<<16 {
|
||||
q.chain[i] = uint16(delta)
|
||||
}
|
||||
}
|
||||
|
||||
if i < matches[0].End && i != matches[0].End+2-q.HashLen {
|
||||
continue
|
||||
}
|
||||
if candidate == 0 || i-candidate > q.MaxDistance {
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for a match.
|
||||
var currentMatch absoluteMatch
|
||||
|
||||
if i-candidate != matches[0].Start-matches[0].Match {
|
||||
if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) {
|
||||
m := extendMatch2(src, i, candidate, e.NextEmit)
|
||||
if m.End-m.Start > q.MinLength {
|
||||
currentMatch = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for j := 0; j < q.ChainLength; j++ {
|
||||
delta := q.chain[candidate]
|
||||
if delta == 0 {
|
||||
break
|
||||
}
|
||||
candidate -= int(delta)
|
||||
if candidate <= 0 || i-candidate > q.MaxDistance {
|
||||
break
|
||||
}
|
||||
if i-candidate != matches[0].Start-matches[0].Match {
|
||||
if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) {
|
||||
m := extendMatch2(src, i, candidate, e.NextEmit)
|
||||
if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) {
|
||||
currentMatch = m
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if currentMatch.End-currentMatch.Start < q.MinLength {
|
||||
continue
|
||||
}
|
||||
|
||||
overlapPenalty := 0
|
||||
if matches[0] != (absoluteMatch{}) {
|
||||
overlapPenalty = 275
|
||||
if currentMatch.Start <= matches[1].End {
|
||||
// This match would completely replace the previous match,
|
||||
// so there is no penalty for overlap.
|
||||
overlapPenalty = 0
|
||||
}
|
||||
}
|
||||
|
||||
if q.score(currentMatch) <= q.score(matches[0])+overlapPenalty {
|
||||
continue
|
||||
}
|
||||
|
||||
matches = [3]absoluteMatch{
|
||||
currentMatch,
|
||||
matches[0],
|
||||
matches[1],
|
||||
}
|
||||
|
||||
if matches[2] == (absoluteMatch{}) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We have three matches, so it's time to emit one and/or eliminate one.
|
||||
switch {
|
||||
case matches[0].Start < matches[2].End:
|
||||
// The first and third matches overlap; discard the one in between.
|
||||
matches = [3]absoluteMatch{
|
||||
matches[0],
|
||||
matches[2],
|
||||
absoluteMatch{},
|
||||
}
|
||||
|
||||
case matches[0].Start < matches[2].End+q.MinLength:
|
||||
// The first and third matches don't overlap, but there's no room for
|
||||
// another match between them. Emit the first match and discard the second.
|
||||
e.emit(matches[2])
|
||||
matches = [3]absoluteMatch{
|
||||
matches[0],
|
||||
absoluteMatch{},
|
||||
absoluteMatch{},
|
||||
}
|
||||
|
||||
default:
|
||||
// Emit the first match, shortening it if necessary to avoid overlap with the second.
|
||||
e.trim(matches[2], matches[1].Start, q.MinLength)
|
||||
matches[2] = absoluteMatch{}
|
||||
}
|
||||
}
|
||||
|
||||
// We've found all the matches now; emit the remaining ones.
|
||||
if matches[1] != (absoluteMatch{}) {
|
||||
e.trim(matches[1], matches[0].Start, q.MinLength)
|
||||
}
|
||||
if matches[0] != (absoluteMatch{}) {
|
||||
e.emit(matches[0])
|
||||
}
|
||||
|
||||
dst = e.Dst
|
||||
if e.NextEmit < len(src) {
|
||||
dst = append(dst, Match{
|
||||
Unmatched: len(src) - e.NextEmit,
|
||||
})
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
const hashMul64 = 0x1E35A7BD1E35A7BD
|
||||
|
||||
// extendMatch returns the largest k such that k <= len(src) and that
|
||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// 0 <= i && i < j && j <= len(src)
|
||||
func extendMatch(src []byte, i, j int) int {
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
for j+8 < len(src) {
|
||||
iBytes := binary.LittleEndian.Uint64(src[i:])
|
||||
jBytes := binary.LittleEndian.Uint64(src[j:])
|
||||
if iBytes != jBytes {
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
return j + bits.TrailingZeros64(iBytes^jBytes)>>3
|
||||
}
|
||||
i, j = i+8, j+8
|
||||
}
|
||||
case "386":
|
||||
// On a 32-bit CPU, we do it 4 bytes at a time.
|
||||
for j+4 < len(src) {
|
||||
iBytes := binary.LittleEndian.Uint32(src[i:])
|
||||
jBytes := binary.LittleEndian.Uint32(src[j:])
|
||||
if iBytes != jBytes {
|
||||
return j + bits.TrailingZeros32(iBytes^jBytes)>>3
|
||||
}
|
||||
i, j = i+4, j+4
|
||||
}
|
||||
}
|
||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
// Given a 4-byte match at src[start] and src[candidate], extendMatch2 extends it
|
||||
// upward as far as possible, and downward no farther than to min.
|
||||
func extendMatch2(src []byte, start, candidate, min int) absoluteMatch {
|
||||
end := extendMatch(src, candidate+4, start+4)
|
||||
for start > min && candidate > 0 && src[start-1] == src[candidate-1] {
|
||||
start--
|
||||
candidate--
|
||||
}
|
||||
return absoluteMatch{
|
||||
Start: start,
|
||||
End: end,
|
||||
Match: candidate,
|
||||
}
|
||||
}
|
||||
103
vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go
generated
vendored
Normal file
103
vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// The matchfinder package defines reusable components for data compression.
|
||||
//
|
||||
// Many compression libraries have two main parts:
|
||||
// - Something that looks for repeated sequences of bytes
|
||||
// - An encoder for the compressed data format (often an entropy coder)
|
||||
//
|
||||
// Although these are logically two separate steps, the implementations are
|
||||
// usually closely tied together. You can't use flate's matcher with snappy's
|
||||
// encoder, for example. This package defines interfaces and an intermediate
|
||||
// representation to allow mixing and matching compression components.
|
||||
package matchfinder
|
||||
|
||||
import "io"
|
||||
|
||||
// A Match is the basic unit of LZ77 compression.
|
||||
type Match struct {
|
||||
Unmatched int // the number of unmatched bytes since the previous match
|
||||
Length int // the number of bytes in the matched string; it may be 0 at the end of the input
|
||||
Distance int // how far back in the stream to copy from
|
||||
}
|
||||
|
||||
// A MatchFinder performs the LZ77 stage of compression, looking for matches.
|
||||
type MatchFinder interface {
|
||||
// FindMatches looks for matches in src, appends them to dst, and returns dst.
|
||||
FindMatches(dst []Match, src []byte) []Match
|
||||
|
||||
// Reset clears any internal state, preparing the MatchFinder to be used with
|
||||
// a new stream.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// An Encoder encodes the data in its final format.
|
||||
type Encoder interface {
|
||||
// Encode appends the encoded format of src to dst, using the match
|
||||
// information from matches.
|
||||
Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte
|
||||
|
||||
// Reset clears any internal state, preparing the Encoder to be used with
|
||||
// a new stream.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// A Writer uses MatchFinder and Encoder to write compressed data to Dest.
|
||||
type Writer struct {
|
||||
Dest io.Writer
|
||||
MatchFinder MatchFinder
|
||||
Encoder Encoder
|
||||
|
||||
// BlockSize is the number of bytes to compress at a time. If it is zero,
|
||||
// each Write operation will be treated as one block.
|
||||
BlockSize int
|
||||
|
||||
err error
|
||||
inBuf []byte
|
||||
outBuf []byte
|
||||
matches []Match
|
||||
}
|
||||
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if w.BlockSize == 0 {
|
||||
return w.writeBlock(p, false)
|
||||
}
|
||||
|
||||
w.inBuf = append(w.inBuf, p...)
|
||||
var pos int
|
||||
for pos = 0; pos+w.BlockSize <= len(w.inBuf) && w.err == nil; pos += w.BlockSize {
|
||||
w.writeBlock(w.inBuf[pos:pos+w.BlockSize], false)
|
||||
}
|
||||
if pos > 0 {
|
||||
n := copy(w.inBuf, w.inBuf[pos:])
|
||||
w.inBuf = w.inBuf[:n]
|
||||
}
|
||||
|
||||
return len(p), w.err
|
||||
}
|
||||
|
||||
func (w *Writer) writeBlock(p []byte, lastBlock bool) (n int, err error) {
|
||||
w.outBuf = w.outBuf[:0]
|
||||
w.matches = w.MatchFinder.FindMatches(w.matches[:0], p)
|
||||
w.outBuf = w.Encoder.Encode(w.outBuf, p, w.matches, lastBlock)
|
||||
_, w.err = w.Dest.Write(w.outBuf)
|
||||
return len(p), w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Close() error {
|
||||
w.writeBlock(w.inBuf, true)
|
||||
w.inBuf = w.inBuf[:0]
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Reset(newDest io.Writer) {
|
||||
w.MatchFinder.Reset()
|
||||
w.Encoder.Reset()
|
||||
w.err = nil
|
||||
w.inBuf = w.inBuf[:0]
|
||||
w.outBuf = w.outBuf[:0]
|
||||
w.matches = w.matches[:0]
|
||||
w.Dest = newDest
|
||||
}
|
||||
53
vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go
generated
vendored
Normal file
53
vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package matchfinder
|
||||
|
||||
import "fmt"
|
||||
|
||||
// A TextEncoder is an Encoder that produces a human-readable representation of
|
||||
// the LZ77 compression. Matches are replaced with <Length,Distance> symbols.
|
||||
type TextEncoder struct{}
|
||||
|
||||
func (t TextEncoder) Reset() {}
|
||||
|
||||
func (t TextEncoder) Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte {
|
||||
pos := 0
|
||||
for _, m := range matches {
|
||||
if m.Unmatched > 0 {
|
||||
dst = append(dst, src[pos:pos+m.Unmatched]...)
|
||||
pos += m.Unmatched
|
||||
}
|
||||
if m.Length > 0 {
|
||||
dst = append(dst, []byte(fmt.Sprintf("<%d,%d>", m.Length, m.Distance))...)
|
||||
pos += m.Length
|
||||
}
|
||||
}
|
||||
if pos < len(src) {
|
||||
dst = append(dst, src[pos:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// A NoMatchFinder implements MatchFinder, but doesn't find any matches.
|
||||
// It can be used to implement the equivalent of the standard library flate package's
|
||||
// HuffmanOnly setting.
|
||||
type NoMatchFinder struct{}
|
||||
|
||||
func (n NoMatchFinder) Reset() {}
|
||||
|
||||
func (n NoMatchFinder) FindMatches(dst []Match, src []byte) []Match {
|
||||
return append(dst, Match{
|
||||
Unmatched: len(src),
|
||||
})
|
||||
}
|
||||
|
||||
// AutoReset wraps a MatchFinder that can return references to data in previous
|
||||
// blocks, and calls Reset before each block. It is useful for (e.g.) using a
|
||||
// snappy Encoder with a MatchFinder designed for flate. (Snappy doesn't
|
||||
// support references between blocks.)
|
||||
type AutoReset struct {
|
||||
MatchFinder
|
||||
}
|
||||
|
||||
func (a AutoReset) FindMatches(dst []Match, src []byte) []Match {
|
||||
a.Reset()
|
||||
return a.MatchFinder.FindMatches(dst, src)
|
||||
}
|
||||
66
vendor/github.com/andybalholm/brotli/memory.go
generated
vendored
Normal file
66
vendor/github.com/andybalholm/brotli/memory.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/*
|
||||
Dynamically grows array capacity to at least the requested size
|
||||
T: data type
|
||||
A: array
|
||||
C: capacity
|
||||
R: requested size
|
||||
*/
|
||||
func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) {
|
||||
if *c < r {
|
||||
var new_size uint = *c
|
||||
if new_size == 0 {
|
||||
new_size = r
|
||||
}
|
||||
|
||||
for new_size < r {
|
||||
new_size *= 2
|
||||
}
|
||||
|
||||
if cap(*a) < int(new_size) {
|
||||
var new_array []byte = make([]byte, new_size)
|
||||
if *c != 0 {
|
||||
copy(new_array, (*a)[:*c])
|
||||
}
|
||||
|
||||
*a = new_array
|
||||
} else {
|
||||
*a = (*a)[:new_size]
|
||||
}
|
||||
|
||||
*c = new_size
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) {
|
||||
var new_array []uint32
|
||||
if *c < r {
|
||||
var new_size uint = *c
|
||||
if new_size == 0 {
|
||||
new_size = r
|
||||
}
|
||||
|
||||
for new_size < r {
|
||||
new_size *= 2
|
||||
}
|
||||
|
||||
if cap(*a) < int(new_size) {
|
||||
new_array = make([]uint32, new_size)
|
||||
if *c != 0 {
|
||||
copy(new_array, (*a)[:*c])
|
||||
}
|
||||
|
||||
*a = new_array
|
||||
} else {
|
||||
*a = (*a)[:new_size]
|
||||
}
|
||||
*c = new_size
|
||||
}
|
||||
}
|
||||
574
vendor/github.com/andybalholm/brotli/metablock.go
generated
vendored
Normal file
574
vendor/github.com/andybalholm/brotli/metablock.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
165
vendor/github.com/andybalholm/brotli/metablock_command.go
generated
vendored
Normal file
165
vendor/github.com/andybalholm/brotli/metablock_command.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Greedy block splitter for one block category (literal, command or distance).
|
||||
*/
|
||||
type blockSplitterCommand struct {
|
||||
alphabet_size_ uint
|
||||
min_block_size_ uint
|
||||
split_threshold_ float64
|
||||
num_blocks_ uint
|
||||
split_ *blockSplit
|
||||
histograms_ []histogramCommand
|
||||
histograms_size_ *uint
|
||||
target_block_size_ uint
|
||||
block_size_ uint
|
||||
curr_histogram_ix_ uint
|
||||
last_histogram_ix_ [2]uint
|
||||
last_entropy_ [2]float64
|
||||
merge_last_count_ uint
|
||||
}
|
||||
|
||||
func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramCommand, histograms_size *uint) {
|
||||
var max_num_blocks uint = num_symbols/min_block_size + 1
|
||||
var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1)
|
||||
/* We have to allocate one more histogram than the maximum number of block
|
||||
types for the current histogram when the meta-block is too big. */
|
||||
self.alphabet_size_ = alphabet_size
|
||||
|
||||
self.min_block_size_ = min_block_size
|
||||
self.split_threshold_ = split_threshold
|
||||
self.num_blocks_ = 0
|
||||
self.split_ = split
|
||||
self.histograms_size_ = histograms_size
|
||||
self.target_block_size_ = min_block_size
|
||||
self.block_size_ = 0
|
||||
self.curr_histogram_ix_ = 0
|
||||
self.merge_last_count_ = 0
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
self.split_.num_blocks = max_num_blocks
|
||||
*histograms_size = max_num_types
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramCommand, (*histograms_size))
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
histogramClearCommand(&self.histograms_[0])
|
||||
|
||||
self.last_histogram_ix_[1] = 0
|
||||
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
|
||||
}
|
||||
|
||||
/* Does either of three things:
|
||||
(1) emits the current block with a new block type;
|
||||
(2) emits the current block with the type of the second last block;
|
||||
(3) merges the current block with the last block. */
|
||||
func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) {
|
||||
var split *blockSplit = self.split_
|
||||
var last_entropy []float64 = self.last_entropy_[:]
|
||||
var histograms []histogramCommand = self.histograms_
|
||||
self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_)
|
||||
if self.num_blocks_ == 0 {
|
||||
/* Create first block. */
|
||||
split.lengths[0] = uint32(self.block_size_)
|
||||
|
||||
split.types[0] = 0
|
||||
last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_)
|
||||
last_entropy[1] = last_entropy[0]
|
||||
self.num_blocks_++
|
||||
split.num_types++
|
||||
self.curr_histogram_ix_++
|
||||
if self.curr_histogram_ix_ < *self.histograms_size_ {
|
||||
histogramClearCommand(&histograms[self.curr_histogram_ix_])
|
||||
}
|
||||
self.block_size_ = 0
|
||||
} else if self.block_size_ > 0 {
|
||||
var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_)
|
||||
var combined_histo [2]histogramCommand
|
||||
var combined_entropy [2]float64
|
||||
var diff [2]float64
|
||||
var j uint
|
||||
for j = 0; j < 2; j++ {
|
||||
var last_histogram_ix uint = self.last_histogram_ix_[j]
|
||||
combined_histo[j] = histograms[self.curr_histogram_ix_]
|
||||
histogramAddHistogramCommand(&combined_histo[j], &histograms[last_histogram_ix])
|
||||
combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_)
|
||||
diff[j] = combined_entropy[j] - entropy - last_entropy[j]
|
||||
}
|
||||
|
||||
if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
|
||||
/* Create new block. */
|
||||
split.lengths[self.num_blocks_] = uint32(self.block_size_)
|
||||
|
||||
split.types[self.num_blocks_] = byte(split.num_types)
|
||||
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
|
||||
self.last_histogram_ix_[0] = uint(byte(split.num_types))
|
||||
last_entropy[1] = last_entropy[0]
|
||||
last_entropy[0] = entropy
|
||||
self.num_blocks_++
|
||||
split.num_types++
|
||||
self.curr_histogram_ix_++
|
||||
if self.curr_histogram_ix_ < *self.histograms_size_ {
|
||||
histogramClearCommand(&histograms[self.curr_histogram_ix_])
|
||||
}
|
||||
self.block_size_ = 0
|
||||
self.merge_last_count_ = 0
|
||||
self.target_block_size_ = self.min_block_size_
|
||||
} else if diff[1] < diff[0]-20.0 {
|
||||
split.lengths[self.num_blocks_] = uint32(self.block_size_)
|
||||
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
|
||||
/* Combine this block with second last block. */
|
||||
|
||||
var tmp uint = self.last_histogram_ix_[0]
|
||||
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
|
||||
self.last_histogram_ix_[1] = tmp
|
||||
histograms[self.last_histogram_ix_[0]] = combined_histo[1]
|
||||
last_entropy[1] = last_entropy[0]
|
||||
last_entropy[0] = combined_entropy[1]
|
||||
self.num_blocks_++
|
||||
self.block_size_ = 0
|
||||
histogramClearCommand(&histograms[self.curr_histogram_ix_])
|
||||
self.merge_last_count_ = 0
|
||||
self.target_block_size_ = self.min_block_size_
|
||||
} else {
|
||||
/* Combine this block with last block. */
|
||||
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
|
||||
|
||||
histograms[self.last_histogram_ix_[0]] = combined_histo[0]
|
||||
last_entropy[0] = combined_entropy[0]
|
||||
if split.num_types == 1 {
|
||||
last_entropy[1] = last_entropy[0]
|
||||
}
|
||||
|
||||
self.block_size_ = 0
|
||||
histogramClearCommand(&histograms[self.curr_histogram_ix_])
|
||||
self.merge_last_count_++
|
||||
if self.merge_last_count_ > 1 {
|
||||
self.target_block_size_ += self.min_block_size_
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if is_final {
|
||||
*self.histograms_size_ = split.num_types
|
||||
split.num_blocks = self.num_blocks_
|
||||
}
|
||||
}
|
||||
|
||||
/* Adds the next symbol to the current histogram. When the current histogram
|
||||
reaches the target size, decides on merging the block. */
|
||||
func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) {
|
||||
histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol)
|
||||
self.block_size_++
|
||||
if self.block_size_ == self.target_block_size_ {
|
||||
blockSplitterFinishBlockCommand(self, false) /* is_final = */
|
||||
}
|
||||
}
|
||||
165
vendor/github.com/andybalholm/brotli/metablock_distance.go
generated
vendored
Normal file
165
vendor/github.com/andybalholm/brotli/metablock_distance.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Greedy block splitter for one block category (literal, command or distance).
|
||||
*/
|
||||
type blockSplitterDistance struct {
|
||||
alphabet_size_ uint
|
||||
min_block_size_ uint
|
||||
split_threshold_ float64
|
||||
num_blocks_ uint
|
||||
split_ *blockSplit
|
||||
histograms_ []histogramDistance
|
||||
histograms_size_ *uint
|
||||
target_block_size_ uint
|
||||
block_size_ uint
|
||||
curr_histogram_ix_ uint
|
||||
last_histogram_ix_ [2]uint
|
||||
last_entropy_ [2]float64
|
||||
merge_last_count_ uint
|
||||
}
|
||||
|
||||
func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramDistance, histograms_size *uint) {
|
||||
var max_num_blocks uint = num_symbols/min_block_size + 1
|
||||
var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1)
|
||||
/* We have to allocate one more histogram than the maximum number of block
|
||||
types for the current histogram when the meta-block is too big. */
|
||||
self.alphabet_size_ = alphabet_size
|
||||
|
||||
self.min_block_size_ = min_block_size
|
||||
self.split_threshold_ = split_threshold
|
||||
self.num_blocks_ = 0
|
||||
self.split_ = split
|
||||
self.histograms_size_ = histograms_size
|
||||
self.target_block_size_ = min_block_size
|
||||
self.block_size_ = 0
|
||||
self.curr_histogram_ix_ = 0
|
||||
self.merge_last_count_ = 0
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
self.split_.num_blocks = max_num_blocks
|
||||
*histograms_size = max_num_types
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramDistance, *histograms_size)
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
histogramClearDistance(&self.histograms_[0])
|
||||
|
||||
self.last_histogram_ix_[1] = 0
|
||||
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
|
||||
}
|
||||
|
||||
/* Does either of three things:
|
||||
(1) emits the current block with a new block type;
|
||||
(2) emits the current block with the type of the second last block;
|
||||
(3) merges the current block with the last block. */
|
||||
func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) {
|
||||
var split *blockSplit = self.split_
|
||||
var last_entropy []float64 = self.last_entropy_[:]
|
||||
var histograms []histogramDistance = self.histograms_
|
||||
self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_)
|
||||
if self.num_blocks_ == 0 {
|
||||
/* Create first block. */
|
||||
split.lengths[0] = uint32(self.block_size_)
|
||||
|
||||
split.types[0] = 0
|
||||
last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_)
|
||||
last_entropy[1] = last_entropy[0]
|
||||
self.num_blocks_++
|
||||
split.num_types++
|
||||
self.curr_histogram_ix_++
|
||||
if self.curr_histogram_ix_ < *self.histograms_size_ {
|
||||
histogramClearDistance(&histograms[self.curr_histogram_ix_])
|
||||
}
|
||||
self.block_size_ = 0
|
||||
} else if self.block_size_ > 0 {
|
||||
var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_)
|
||||
var combined_histo [2]histogramDistance
|
||||
var combined_entropy [2]float64
|
||||
var diff [2]float64
|
||||
var j uint
|
||||
for j = 0; j < 2; j++ {
|
||||
var last_histogram_ix uint = self.last_histogram_ix_[j]
|
||||
combined_histo[j] = histograms[self.curr_histogram_ix_]
|
||||
histogramAddHistogramDistance(&combined_histo[j], &histograms[last_histogram_ix])
|
||||
combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_)
|
||||
diff[j] = combined_entropy[j] - entropy - last_entropy[j]
|
||||
}
|
||||
|
||||
if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
|
||||
/* Create new block. */
|
||||
split.lengths[self.num_blocks_] = uint32(self.block_size_)
|
||||
|
||||
split.types[self.num_blocks_] = byte(split.num_types)
|
||||
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
|
||||
self.last_histogram_ix_[0] = uint(byte(split.num_types))
|
||||
last_entropy[1] = last_entropy[0]
|
||||
last_entropy[0] = entropy
|
||||
self.num_blocks_++
|
||||
split.num_types++
|
||||
self.curr_histogram_ix_++
|
||||
if self.curr_histogram_ix_ < *self.histograms_size_ {
|
||||
histogramClearDistance(&histograms[self.curr_histogram_ix_])
|
||||
}
|
||||
self.block_size_ = 0
|
||||
self.merge_last_count_ = 0
|
||||
self.target_block_size_ = self.min_block_size_
|
||||
} else if diff[1] < diff[0]-20.0 {
|
||||
split.lengths[self.num_blocks_] = uint32(self.block_size_)
|
||||
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
|
||||
/* Combine this block with second last block. */
|
||||
|
||||
var tmp uint = self.last_histogram_ix_[0]
|
||||
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
|
||||
self.last_histogram_ix_[1] = tmp
|
||||
histograms[self.last_histogram_ix_[0]] = combined_histo[1]
|
||||
last_entropy[1] = last_entropy[0]
|
||||
last_entropy[0] = combined_entropy[1]
|
||||
self.num_blocks_++
|
||||
self.block_size_ = 0
|
||||
histogramClearDistance(&histograms[self.curr_histogram_ix_])
|
||||
self.merge_last_count_ = 0
|
||||
self.target_block_size_ = self.min_block_size_
|
||||
} else {
|
||||
/* Combine this block with last block. */
|
||||
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
|
||||
|
||||
histograms[self.last_histogram_ix_[0]] = combined_histo[0]
|
||||
last_entropy[0] = combined_entropy[0]
|
||||
if split.num_types == 1 {
|
||||
last_entropy[1] = last_entropy[0]
|
||||
}
|
||||
|
||||
self.block_size_ = 0
|
||||
histogramClearDistance(&histograms[self.curr_histogram_ix_])
|
||||
self.merge_last_count_++
|
||||
if self.merge_last_count_ > 1 {
|
||||
self.target_block_size_ += self.min_block_size_
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if is_final {
|
||||
*self.histograms_size_ = split.num_types
|
||||
split.num_blocks = self.num_blocks_
|
||||
}
|
||||
}
|
||||
|
||||
/* Adds the next symbol to the current histogram. When the current histogram
|
||||
reaches the target size, decides on merging the block. */
|
||||
func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) {
|
||||
histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol)
|
||||
self.block_size_++
|
||||
if self.block_size_ == self.target_block_size_ {
|
||||
blockSplitterFinishBlockDistance(self, false) /* is_final = */
|
||||
}
|
||||
}
|
||||
165
vendor/github.com/andybalholm/brotli/metablock_literal.go
generated
vendored
Normal file
165
vendor/github.com/andybalholm/brotli/metablock_literal.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Greedy block splitter for one block category (literal, command or distance).
|
||||
*/
|
||||
type blockSplitterLiteral struct {
|
||||
alphabet_size_ uint
|
||||
min_block_size_ uint
|
||||
split_threshold_ float64
|
||||
num_blocks_ uint
|
||||
split_ *blockSplit
|
||||
histograms_ []histogramLiteral
|
||||
histograms_size_ *uint
|
||||
target_block_size_ uint
|
||||
block_size_ uint
|
||||
curr_histogram_ix_ uint
|
||||
last_histogram_ix_ [2]uint
|
||||
last_entropy_ [2]float64
|
||||
merge_last_count_ uint
|
||||
}
|
||||
|
||||
func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramLiteral, histograms_size *uint) {
|
||||
var max_num_blocks uint = num_symbols/min_block_size + 1
|
||||
var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1)
|
||||
/* We have to allocate one more histogram than the maximum number of block
|
||||
types for the current histogram when the meta-block is too big. */
|
||||
self.alphabet_size_ = alphabet_size
|
||||
|
||||
self.min_block_size_ = min_block_size
|
||||
self.split_threshold_ = split_threshold
|
||||
self.num_blocks_ = 0
|
||||
self.split_ = split
|
||||
self.histograms_size_ = histograms_size
|
||||
self.target_block_size_ = min_block_size
|
||||
self.block_size_ = 0
|
||||
self.curr_histogram_ix_ = 0
|
||||
self.merge_last_count_ = 0
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
self.split_.num_blocks = max_num_blocks
|
||||
*histograms_size = max_num_types
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramLiteral, *histograms_size)
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
histogramClearLiteral(&self.histograms_[0])
|
||||
|
||||
self.last_histogram_ix_[1] = 0
|
||||
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
|
||||
}
|
||||
|
||||
/* Does either of three things:
|
||||
(1) emits the current block with a new block type;
|
||||
(2) emits the current block with the type of the second last block;
|
||||
(3) merges the current block with the last block. */
|
||||
func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) {
|
||||
var split *blockSplit = self.split_
|
||||
var last_entropy []float64 = self.last_entropy_[:]
|
||||
var histograms []histogramLiteral = self.histograms_
|
||||
self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_)
|
||||
if self.num_blocks_ == 0 {
|
||||
/* Create first block. */
|
||||
split.lengths[0] = uint32(self.block_size_)
|
||||
|
||||
split.types[0] = 0
|
||||
last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_)
|
||||
last_entropy[1] = last_entropy[0]
|
||||
self.num_blocks_++
|
||||
split.num_types++
|
||||
self.curr_histogram_ix_++
|
||||
if self.curr_histogram_ix_ < *self.histograms_size_ {
|
||||
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
|
||||
}
|
||||
self.block_size_ = 0
|
||||
} else if self.block_size_ > 0 {
|
||||
var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_)
|
||||
var combined_histo [2]histogramLiteral
|
||||
var combined_entropy [2]float64
|
||||
var diff [2]float64
|
||||
var j uint
|
||||
for j = 0; j < 2; j++ {
|
||||
var last_histogram_ix uint = self.last_histogram_ix_[j]
|
||||
combined_histo[j] = histograms[self.curr_histogram_ix_]
|
||||
histogramAddHistogramLiteral(&combined_histo[j], &histograms[last_histogram_ix])
|
||||
combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_)
|
||||
diff[j] = combined_entropy[j] - entropy - last_entropy[j]
|
||||
}
|
||||
|
||||
if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ {
|
||||
/* Create new block. */
|
||||
split.lengths[self.num_blocks_] = uint32(self.block_size_)
|
||||
|
||||
split.types[self.num_blocks_] = byte(split.num_types)
|
||||
self.last_histogram_ix_[1] = self.last_histogram_ix_[0]
|
||||
self.last_histogram_ix_[0] = uint(byte(split.num_types))
|
||||
last_entropy[1] = last_entropy[0]
|
||||
last_entropy[0] = entropy
|
||||
self.num_blocks_++
|
||||
split.num_types++
|
||||
self.curr_histogram_ix_++
|
||||
if self.curr_histogram_ix_ < *self.histograms_size_ {
|
||||
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
|
||||
}
|
||||
self.block_size_ = 0
|
||||
self.merge_last_count_ = 0
|
||||
self.target_block_size_ = self.min_block_size_
|
||||
} else if diff[1] < diff[0]-20.0 {
|
||||
split.lengths[self.num_blocks_] = uint32(self.block_size_)
|
||||
split.types[self.num_blocks_] = split.types[self.num_blocks_-2]
|
||||
/* Combine this block with second last block. */
|
||||
|
||||
var tmp uint = self.last_histogram_ix_[0]
|
||||
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
|
||||
self.last_histogram_ix_[1] = tmp
|
||||
histograms[self.last_histogram_ix_[0]] = combined_histo[1]
|
||||
last_entropy[1] = last_entropy[0]
|
||||
last_entropy[0] = combined_entropy[1]
|
||||
self.num_blocks_++
|
||||
self.block_size_ = 0
|
||||
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
|
||||
self.merge_last_count_ = 0
|
||||
self.target_block_size_ = self.min_block_size_
|
||||
} else {
|
||||
/* Combine this block with last block. */
|
||||
split.lengths[self.num_blocks_-1] += uint32(self.block_size_)
|
||||
|
||||
histograms[self.last_histogram_ix_[0]] = combined_histo[0]
|
||||
last_entropy[0] = combined_entropy[0]
|
||||
if split.num_types == 1 {
|
||||
last_entropy[1] = last_entropy[0]
|
||||
}
|
||||
|
||||
self.block_size_ = 0
|
||||
histogramClearLiteral(&histograms[self.curr_histogram_ix_])
|
||||
self.merge_last_count_++
|
||||
if self.merge_last_count_ > 1 {
|
||||
self.target_block_size_ += self.min_block_size_
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if is_final {
|
||||
*self.histograms_size_ = split.num_types
|
||||
split.num_blocks = self.num_blocks_
|
||||
}
|
||||
}
|
||||
|
||||
/* Adds the next symbol to the current histogram. When the current histogram
|
||||
reaches the target size, decides on merging the block. */
|
||||
func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) {
|
||||
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol)
|
||||
self.block_size_++
|
||||
if self.block_size_ == self.target_block_size_ {
|
||||
blockSplitterFinishBlockLiteral(self, false) /* is_final = */
|
||||
}
|
||||
}
|
||||
37
vendor/github.com/andybalholm/brotli/params.go
generated
vendored
Normal file
37
vendor/github.com/andybalholm/brotli/params.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2017 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Parameters for the Brotli encoder with chosen quality levels. */
|
||||
type hasherParams struct {
|
||||
type_ int
|
||||
bucket_bits int
|
||||
block_bits int
|
||||
hash_len int
|
||||
num_last_distances_to_check int
|
||||
}
|
||||
|
||||
type distanceParams struct {
|
||||
distance_postfix_bits uint32
|
||||
num_direct_distance_codes uint32
|
||||
alphabet_size uint32
|
||||
max_distance uint
|
||||
}
|
||||
|
||||
/* Encoding parameters */
|
||||
type encoderParams struct {
|
||||
mode int
|
||||
quality int
|
||||
lgwin uint
|
||||
lgblock int
|
||||
size_hint uint
|
||||
disable_literal_context_modeling bool
|
||||
large_window bool
|
||||
hasher hasherParams
|
||||
dist distanceParams
|
||||
dictionary encoderDictionary
|
||||
}
|
||||
103
vendor/github.com/andybalholm/brotli/platform.go
generated
vendored
Normal file
103
vendor/github.com/andybalholm/brotli/platform.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func brotli_min_double(a float64, b float64) float64 {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_max_double(a float64, b float64) float64 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_min_float(a float32, b float32) float32 {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_max_float(a float32, b float32) float32 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_min_int(a int, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_max_int(a int, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_min_size_t(a uint, b uint) uint {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_max_size_t(a uint, b uint) uint {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_min_uint32_t(a uint32, b uint32) uint32 {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_max_uint32_t(a uint32, b uint32) uint32 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_min_uint8_t(a byte, b byte) byte {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func brotli_max_uint8_t(a byte, b byte) byte {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
30
vendor/github.com/andybalholm/brotli/prefix.go
generated
vendored
Normal file
30
vendor/github.com/andybalholm/brotli/prefix.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Functions for encoding of integers into prefix codes the amount of extra
|
||||
bits, and the actual values of the extra bits. */
|
||||
|
||||
/* Here distance_code is an intermediate code, i.e. one of the special codes or
|
||||
the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */
|
||||
func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) {
|
||||
if distance_code < numDistanceShortCodes+num_direct_codes {
|
||||
*code = uint16(distance_code)
|
||||
*extra_bits = 0
|
||||
return
|
||||
} else {
|
||||
var dist uint = (uint(1) << (postfix_bits + 2)) + (distance_code - numDistanceShortCodes - num_direct_codes)
|
||||
var bucket uint = uint(log2FloorNonZero(dist) - 1)
|
||||
var postfix_mask uint = (1 << postfix_bits) - 1
|
||||
var postfix uint = dist & postfix_mask
|
||||
var prefix uint = (dist >> bucket) & 1
|
||||
var offset uint = (2 + prefix) << bucket
|
||||
var nbits uint = bucket - postfix_bits
|
||||
*code = uint16(nbits<<10 | (numDistanceShortCodes + num_direct_codes + ((2*(nbits-1) + prefix) << postfix_bits) + postfix))
|
||||
*extra_bits = uint32((dist - offset) >> postfix_bits)
|
||||
}
|
||||
}
|
||||
723
vendor/github.com/andybalholm/brotli/prefix_dec.go
generated
vendored
Normal file
723
vendor/github.com/andybalholm/brotli/prefix_dec.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
196
vendor/github.com/andybalholm/brotli/quality.go
generated
vendored
Normal file
196
vendor/github.com/andybalholm/brotli/quality.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
package brotli
|
||||
|
||||
const fastOnePassCompressionQuality = 0
|
||||
|
||||
const fastTwoPassCompressionQuality = 1
|
||||
|
||||
const zopflificationQuality = 10
|
||||
|
||||
const hqZopflificationQuality = 11
|
||||
|
||||
const maxQualityForStaticEntropyCodes = 2
|
||||
|
||||
const minQualityForBlockSplit = 4
|
||||
|
||||
const minQualityForNonzeroDistanceParams = 4
|
||||
|
||||
const minQualityForOptimizeHistograms = 4
|
||||
|
||||
const minQualityForExtensiveReferenceSearch = 5
|
||||
|
||||
const minQualityForContextModeling = 5
|
||||
|
||||
const minQualityForHqContextModeling = 7
|
||||
|
||||
const minQualityForHqBlockSplitting = 10
|
||||
|
||||
/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting,
|
||||
so we buffer at most this much literals and commands. */
|
||||
const maxNumDelayedSymbols = 0x2FFF
|
||||
|
||||
/* Returns hash-table size for quality levels 0 and 1. */
|
||||
func maxHashTableSize(quality int) uint {
|
||||
if quality == fastOnePassCompressionQuality {
|
||||
return 1 << 15
|
||||
} else {
|
||||
return 1 << 17
|
||||
}
|
||||
}
|
||||
|
||||
/* The maximum length for which the zopflification uses distinct distances. */
|
||||
const maxZopfliLenQuality10 = 150
|
||||
|
||||
const maxZopfliLenQuality11 = 325
|
||||
|
||||
/* Do not thoroughly search when a long copy is found. */
|
||||
const longCopyQuickStep = 16384
|
||||
|
||||
func maxZopfliLen(params *encoderParams) uint {
|
||||
if params.quality <= 10 {
|
||||
return maxZopfliLenQuality10
|
||||
} else {
|
||||
return maxZopfliLenQuality11
|
||||
}
|
||||
}
|
||||
|
||||
/* Number of best candidates to evaluate to expand Zopfli chain. */
|
||||
func maxZopfliCandidates(params *encoderParams) uint {
|
||||
if params.quality <= 10 {
|
||||
return 1
|
||||
} else {
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeParams(params *encoderParams) {
|
||||
params.quality = brotli_min_int(maxQuality, brotli_max_int(minQuality, params.quality))
|
||||
if params.quality <= maxQualityForStaticEntropyCodes {
|
||||
params.large_window = false
|
||||
}
|
||||
|
||||
if params.lgwin < minWindowBits {
|
||||
params.lgwin = minWindowBits
|
||||
} else {
|
||||
var max_lgwin int
|
||||
if params.large_window {
|
||||
max_lgwin = largeMaxWindowBits
|
||||
} else {
|
||||
max_lgwin = maxWindowBits
|
||||
}
|
||||
if params.lgwin > uint(max_lgwin) {
|
||||
params.lgwin = uint(max_lgwin)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns optimized lg_block value. */
|
||||
func computeLgBlock(params *encoderParams) int {
|
||||
var lgblock int = params.lgblock
|
||||
if params.quality == fastOnePassCompressionQuality || params.quality == fastTwoPassCompressionQuality {
|
||||
lgblock = int(params.lgwin)
|
||||
} else if params.quality < minQualityForBlockSplit {
|
||||
lgblock = 14
|
||||
} else if lgblock == 0 {
|
||||
lgblock = 16
|
||||
if params.quality >= 9 && params.lgwin > uint(lgblock) {
|
||||
lgblock = brotli_min_int(18, int(params.lgwin))
|
||||
}
|
||||
} else {
|
||||
lgblock = brotli_min_int(maxInputBlockBits, brotli_max_int(minInputBlockBits, lgblock))
|
||||
}
|
||||
|
||||
return lgblock
|
||||
}
|
||||
|
||||
/* Returns log2 of the size of main ring buffer area.
|
||||
Allocate at least lgwin + 1 bits for the ring buffer so that the newly
|
||||
added block fits there completely and we still get lgwin bits and at least
|
||||
read_block_size_bits + 1 bits because the copy tail length needs to be
|
||||
smaller than ring-buffer size. */
|
||||
func computeRbBits(params *encoderParams) int {
|
||||
return 1 + brotli_max_int(int(params.lgwin), params.lgblock)
|
||||
}
|
||||
|
||||
func maxMetablockSize(params *encoderParams) uint {
|
||||
var bits int = brotli_min_int(computeRbBits(params), maxInputBlockBits)
|
||||
return uint(1) << uint(bits)
|
||||
}
|
||||
|
||||
/* When searching for backward references and have not seen matches for a long
|
||||
time, we can skip some match lookups. Unsuccessful match lookups are very
|
||||
expensive and this kind of a heuristic speeds up compression quite a lot.
|
||||
At first 8 byte strides are taken and every second byte is put to hasher.
|
||||
After 4x more literals stride by 16 bytes, every put 4-th byte to hasher.
|
||||
Applied only to qualities 2 to 9. */
|
||||
func literalSpreeLengthForSparseSearch(params *encoderParams) uint {
|
||||
if params.quality < 9 {
|
||||
return 64
|
||||
} else {
|
||||
return 512
|
||||
}
|
||||
}
|
||||
|
||||
func chooseHasher(params *encoderParams, hparams *hasherParams) {
|
||||
if params.quality > 9 {
|
||||
hparams.type_ = 10
|
||||
} else if params.quality == 4 && params.size_hint >= 1<<20 {
|
||||
hparams.type_ = 54
|
||||
} else if params.quality < 5 {
|
||||
hparams.type_ = params.quality
|
||||
} else if params.lgwin <= 16 {
|
||||
if params.quality < 7 {
|
||||
hparams.type_ = 40
|
||||
} else if params.quality < 9 {
|
||||
hparams.type_ = 41
|
||||
} else {
|
||||
hparams.type_ = 42
|
||||
}
|
||||
} else if params.size_hint >= 1<<20 && params.lgwin >= 19 {
|
||||
hparams.type_ = 6
|
||||
hparams.block_bits = params.quality - 1
|
||||
hparams.bucket_bits = 15
|
||||
hparams.hash_len = 5
|
||||
if params.quality < 7 {
|
||||
hparams.num_last_distances_to_check = 4
|
||||
} else if params.quality < 9 {
|
||||
hparams.num_last_distances_to_check = 10
|
||||
} else {
|
||||
hparams.num_last_distances_to_check = 16
|
||||
}
|
||||
} else {
|
||||
hparams.type_ = 5
|
||||
hparams.block_bits = params.quality - 1
|
||||
if params.quality < 7 {
|
||||
hparams.bucket_bits = 14
|
||||
} else {
|
||||
hparams.bucket_bits = 15
|
||||
}
|
||||
if params.quality < 7 {
|
||||
hparams.num_last_distances_to_check = 4
|
||||
} else if params.quality < 9 {
|
||||
hparams.num_last_distances_to_check = 10
|
||||
} else {
|
||||
hparams.num_last_distances_to_check = 16
|
||||
}
|
||||
}
|
||||
|
||||
if params.lgwin > 24 {
|
||||
/* Different hashers for large window brotli: not for qualities <= 2,
|
||||
these are too fast for large window. Not for qualities >= 10: their
|
||||
hasher already works well with large window. So the changes are:
|
||||
H3 --> H35: for quality 3.
|
||||
H54 --> H55: for quality 4 with size hint > 1MB
|
||||
H6 --> H65: for qualities 5, 6, 7, 8, 9. */
|
||||
if hparams.type_ == 3 {
|
||||
hparams.type_ = 35
|
||||
}
|
||||
|
||||
if hparams.type_ == 54 {
|
||||
hparams.type_ = 55
|
||||
}
|
||||
|
||||
if hparams.type_ == 6 {
|
||||
hparams.type_ = 65
|
||||
}
|
||||
}
|
||||
}
|
||||
108
vendor/github.com/andybalholm/brotli/reader.go
generated
vendored
Normal file
108
vendor/github.com/andybalholm/brotli/reader.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type decodeError int
|
||||
|
||||
func (err decodeError) Error() string {
|
||||
return "brotli: " + string(decoderErrorString(int(err)))
|
||||
}
|
||||
|
||||
var errExcessiveInput = errors.New("brotli: excessive input")
|
||||
var errInvalidState = errors.New("brotli: invalid state")
|
||||
|
||||
// readBufSize is a "good" buffer size that avoids excessive round-trips
|
||||
// between C and Go but doesn't waste too much memory on buffering.
|
||||
// It is arbitrarily chosen to be equal to the constant used in io.Copy.
|
||||
const readBufSize = 32 * 1024
|
||||
|
||||
// NewReader creates a new Reader reading the given reader.
|
||||
func NewReader(src io.Reader) *Reader {
|
||||
r := new(Reader)
|
||||
r.Reset(src)
|
||||
return r
|
||||
}
|
||||
|
||||
// Reset discards the Reader's state and makes it equivalent to the result of
|
||||
// its original state from NewReader, but reading from src instead.
|
||||
// This permits reusing a Reader rather than allocating a new one.
|
||||
// Error is always nil
|
||||
func (r *Reader) Reset(src io.Reader) error {
|
||||
if r.error_code < 0 {
|
||||
// There was an unrecoverable error, leaving the Reader's state
|
||||
// undefined. Clear out everything but the buffer.
|
||||
*r = Reader{buf: r.buf}
|
||||
}
|
||||
|
||||
decoderStateInit(r)
|
||||
r.src = src
|
||||
if r.buf == nil {
|
||||
r.buf = make([]byte, readBufSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
if !decoderHasMoreOutput(r) && len(r.in) == 0 {
|
||||
m, readErr := r.src.Read(r.buf)
|
||||
if m == 0 {
|
||||
// If readErr is `nil`, we just proxy underlying stream behavior.
|
||||
return 0, readErr
|
||||
}
|
||||
r.in = r.buf[:m]
|
||||
}
|
||||
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
for {
|
||||
var written uint
|
||||
in_len := uint(len(r.in))
|
||||
out_len := uint(len(p))
|
||||
in_remaining := in_len
|
||||
out_remaining := out_len
|
||||
result := decoderDecompressStream(r, &in_remaining, &r.in, &out_remaining, &p)
|
||||
written = out_len - out_remaining
|
||||
n = int(written)
|
||||
|
||||
switch result {
|
||||
case decoderResultSuccess:
|
||||
if len(r.in) > 0 {
|
||||
return n, errExcessiveInput
|
||||
}
|
||||
return n, nil
|
||||
case decoderResultError:
|
||||
return n, decodeError(decoderGetErrorCode(r))
|
||||
case decoderResultNeedsMoreOutput:
|
||||
if n == 0 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
return n, nil
|
||||
case decoderNeedsMoreInput:
|
||||
}
|
||||
|
||||
if len(r.in) != 0 {
|
||||
return 0, errInvalidState
|
||||
}
|
||||
|
||||
// Calling r.src.Read may block. Don't block if we have data to return.
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Top off the buffer.
|
||||
encN, err := r.src.Read(r.buf)
|
||||
if encN == 0 {
|
||||
// Not enough data to complete decoding.
|
||||
if err == io.EOF {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
r.in = r.buf[:encN]
|
||||
}
|
||||
}
|
||||
134
vendor/github.com/andybalholm/brotli/ringbuffer.go
generated
vendored
Normal file
134
vendor/github.com/andybalholm/brotli/ringbuffer.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
|
||||
data in a circular manner: writing a byte writes it to:
|
||||
`position() % (1 << window_bits)'.
|
||||
For convenience, the ringBuffer array contains another copy of the
|
||||
first `1 << tail_bits' bytes:
|
||||
buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
|
||||
and another copy of the last two bytes:
|
||||
buffer_[-1] == buffer_[(1 << window_bits) - 1] and
|
||||
buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
|
||||
type ringBuffer struct {
|
||||
size_ uint32
|
||||
mask_ uint32
|
||||
tail_size_ uint32
|
||||
total_size_ uint32
|
||||
cur_size_ uint32
|
||||
pos_ uint32
|
||||
data_ []byte
|
||||
buffer_ []byte
|
||||
}
|
||||
|
||||
func ringBufferInit(rb *ringBuffer) {
|
||||
rb.pos_ = 0
|
||||
}
|
||||
|
||||
func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
|
||||
var window_bits int = computeRbBits(params)
|
||||
var tail_bits int = params.lgblock
|
||||
*(*uint32)(&rb.size_) = 1 << uint(window_bits)
|
||||
*(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1
|
||||
*(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits)
|
||||
*(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_
|
||||
}
|
||||
|
||||
const kSlackForEightByteHashingEverywhere uint = 7
|
||||
|
||||
/* Allocates or re-allocates data_ to the given length + plus some slack
|
||||
region before and after. Fills the slack regions with zeros. */
|
||||
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
|
||||
var new_data []byte
|
||||
var i uint
|
||||
size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
|
||||
if cap(rb.data_) < size {
|
||||
new_data = make([]byte, size)
|
||||
} else {
|
||||
new_data = rb.data_[:size]
|
||||
}
|
||||
if rb.data_ != nil {
|
||||
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
|
||||
}
|
||||
|
||||
rb.data_ = new_data
|
||||
rb.cur_size_ = buflen
|
||||
rb.buffer_ = rb.data_[2:]
|
||||
rb.data_[1] = 0
|
||||
rb.data_[0] = rb.data_[1]
|
||||
for i = 0; i < kSlackForEightByteHashingEverywhere; i++ {
|
||||
rb.buffer_[rb.cur_size_+uint32(i)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) {
|
||||
var masked_pos uint = uint(rb.pos_ & rb.mask_)
|
||||
if uint32(masked_pos) < rb.tail_size_ {
|
||||
/* Just fill the tail buffer with the beginning data. */
|
||||
var p uint = uint(rb.size_ + uint32(masked_pos))
|
||||
copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))])
|
||||
}
|
||||
}
|
||||
|
||||
/* Push bytes into the ring buffer. */
|
||||
func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) {
|
||||
if rb.pos_ == 0 && uint32(n) < rb.tail_size_ {
|
||||
/* Special case for the first write: to process the first block, we don't
|
||||
need to allocate the whole ring-buffer and we don't need the tail
|
||||
either. However, we do this memory usage optimization only if the
|
||||
first write is less than the tail size, which is also the input block
|
||||
size, otherwise it is likely that other blocks will follow and we
|
||||
will need to reallocate to the full size anyway. */
|
||||
rb.pos_ = uint32(n)
|
||||
|
||||
ringBufferInitBuffer(rb.pos_, rb)
|
||||
copy(rb.buffer_, bytes[:n])
|
||||
return
|
||||
}
|
||||
|
||||
if rb.cur_size_ < rb.total_size_ {
|
||||
/* Lazily allocate the full buffer. */
|
||||
ringBufferInitBuffer(rb.total_size_, rb)
|
||||
|
||||
/* Initialize the last two bytes to zero, so that we don't have to worry
|
||||
later when we copy the last two bytes to the first two positions. */
|
||||
rb.buffer_[rb.size_-2] = 0
|
||||
|
||||
rb.buffer_[rb.size_-1] = 0
|
||||
}
|
||||
{
|
||||
var masked_pos uint = uint(rb.pos_ & rb.mask_)
|
||||
|
||||
/* The length of the writes is limited so that we do not need to worry
|
||||
about a write */
|
||||
ringBufferWriteTail(bytes, n, rb)
|
||||
|
||||
if uint32(masked_pos+n) <= rb.size_ {
|
||||
/* A single write fits. */
|
||||
copy(rb.buffer_[masked_pos:], bytes[:n])
|
||||
} else {
|
||||
/* Split into two writes.
|
||||
Copy into the end of the buffer, including the tail buffer. */
|
||||
copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))])
|
||||
|
||||
/* Copy into the beginning of the buffer */
|
||||
copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))])
|
||||
}
|
||||
}
|
||||
{
|
||||
var not_first_lap bool = rb.pos_&(1<<31) != 0
|
||||
var rb_pos_mask uint32 = (1 << 31) - 1
|
||||
rb.data_[0] = rb.buffer_[rb.size_-2]
|
||||
rb.data_[1] = rb.buffer_[rb.size_-1]
|
||||
rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask)
|
||||
if not_first_lap {
|
||||
/* Wrap, but preserve not-a-first-lap feature. */
|
||||
rb.pos_ |= 1 << 31
|
||||
}
|
||||
}
|
||||
}
|
||||
294
vendor/github.com/andybalholm/brotli/state.go
generated
vendored
Normal file
294
vendor/github.com/andybalholm/brotli/state.go
generated
vendored
Normal file
@ -0,0 +1,294 @@
|
||||
package brotli
|
||||
|
||||
import "io"
|
||||
|
||||
/* Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Brotli state for partial streaming decoding. */
|
||||
const (
|
||||
stateUninited = iota
|
||||
stateLargeWindowBits
|
||||
stateInitialize
|
||||
stateMetablockBegin
|
||||
stateMetablockHeader
|
||||
stateMetablockHeader2
|
||||
stateContextModes
|
||||
stateCommandBegin
|
||||
stateCommandInner
|
||||
stateCommandPostDecodeLiterals
|
||||
stateCommandPostWrapCopy
|
||||
stateUncompressed
|
||||
stateMetadata
|
||||
stateCommandInnerWrite
|
||||
stateMetablockDone
|
||||
stateCommandPostWrite1
|
||||
stateCommandPostWrite2
|
||||
stateHuffmanCode0
|
||||
stateHuffmanCode1
|
||||
stateHuffmanCode2
|
||||
stateHuffmanCode3
|
||||
stateContextMap1
|
||||
stateContextMap2
|
||||
stateTreeGroup
|
||||
stateDone
|
||||
)
|
||||
|
||||
const (
|
||||
stateMetablockHeaderNone = iota
|
||||
stateMetablockHeaderEmpty
|
||||
stateMetablockHeaderNibbles
|
||||
stateMetablockHeaderSize
|
||||
stateMetablockHeaderUncompressed
|
||||
stateMetablockHeaderReserved
|
||||
stateMetablockHeaderBytes
|
||||
stateMetablockHeaderMetadata
|
||||
)
|
||||
|
||||
const (
|
||||
stateUncompressedNone = iota
|
||||
stateUncompressedWrite
|
||||
)
|
||||
|
||||
const (
|
||||
stateTreeGroupNone = iota
|
||||
stateTreeGroupLoop
|
||||
)
|
||||
|
||||
const (
|
||||
stateContextMapNone = iota
|
||||
stateContextMapReadPrefix
|
||||
stateContextMapHuffman
|
||||
stateContextMapDecode
|
||||
stateContextMapTransform
|
||||
)
|
||||
|
||||
const (
|
||||
stateHuffmanNone = iota
|
||||
stateHuffmanSimpleSize
|
||||
stateHuffmanSimpleRead
|
||||
stateHuffmanSimpleBuild
|
||||
stateHuffmanComplex
|
||||
stateHuffmanLengthSymbols
|
||||
)
|
||||
|
||||
const (
|
||||
stateDecodeUint8None = iota
|
||||
stateDecodeUint8Short
|
||||
stateDecodeUint8Long
|
||||
)
|
||||
|
||||
const (
|
||||
stateReadBlockLengthNone = iota
|
||||
stateReadBlockLengthSuffix
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
src io.Reader
|
||||
buf []byte // scratch space for reading from src
|
||||
in []byte // current chunk to decode; usually aliases buf
|
||||
|
||||
state int
|
||||
loop_counter int
|
||||
br bitReader
|
||||
buffer struct {
|
||||
u64 uint64
|
||||
u8 [8]byte
|
||||
}
|
||||
buffer_length uint32
|
||||
pos int
|
||||
max_backward_distance int
|
||||
max_distance int
|
||||
ringbuffer_size int
|
||||
ringbuffer_mask int
|
||||
dist_rb_idx int
|
||||
dist_rb [4]int
|
||||
error_code int
|
||||
sub_loop_counter uint32
|
||||
ringbuffer []byte
|
||||
ringbuffer_end []byte
|
||||
htree_command []huffmanCode
|
||||
context_lookup []byte
|
||||
context_map_slice []byte
|
||||
dist_context_map_slice []byte
|
||||
literal_hgroup huffmanTreeGroup
|
||||
insert_copy_hgroup huffmanTreeGroup
|
||||
distance_hgroup huffmanTreeGroup
|
||||
block_type_trees []huffmanCode
|
||||
block_len_trees []huffmanCode
|
||||
trivial_literal_context int
|
||||
distance_context int
|
||||
meta_block_remaining_len int
|
||||
block_length_index uint32
|
||||
block_length [3]uint32
|
||||
num_block_types [3]uint32
|
||||
block_type_rb [6]uint32
|
||||
distance_postfix_bits uint32
|
||||
num_direct_distance_codes uint32
|
||||
distance_postfix_mask int
|
||||
num_dist_htrees uint32
|
||||
dist_context_map []byte
|
||||
literal_htree []huffmanCode
|
||||
dist_htree_index byte
|
||||
repeat_code_len uint32
|
||||
prev_code_len uint32
|
||||
copy_length int
|
||||
distance_code int
|
||||
rb_roundtrips uint
|
||||
partial_pos_out uint
|
||||
symbol uint32
|
||||
repeat uint32
|
||||
space uint32
|
||||
table [32]huffmanCode
|
||||
symbol_lists symbolList
|
||||
symbols_lists_array [huffmanMaxCodeLength + 1 + numCommandSymbols]uint16
|
||||
next_symbol [32]int
|
||||
code_length_code_lengths [codeLengthCodes]byte
|
||||
code_length_histo [16]uint16
|
||||
htree_index int
|
||||
next []huffmanCode
|
||||
context_index uint32
|
||||
max_run_length_prefix uint32
|
||||
code uint32
|
||||
context_map_table [huffmanMaxSize272]huffmanCode
|
||||
substate_metablock_header int
|
||||
substate_tree_group int
|
||||
substate_context_map int
|
||||
substate_uncompressed int
|
||||
substate_huffman int
|
||||
substate_decode_uint8 int
|
||||
substate_read_block_length int
|
||||
is_last_metablock uint
|
||||
is_uncompressed uint
|
||||
is_metadata uint
|
||||
should_wrap_ringbuffer uint
|
||||
canny_ringbuffer_allocation uint
|
||||
large_window bool
|
||||
size_nibbles uint
|
||||
window_bits uint32
|
||||
new_ringbuffer_size int
|
||||
num_literal_htrees uint32
|
||||
context_map []byte
|
||||
context_modes []byte
|
||||
dictionary *dictionary
|
||||
transforms *transforms
|
||||
trivial_literal_contexts [8]uint32
|
||||
}
|
||||
|
||||
func decoderStateInit(s *Reader) bool {
|
||||
s.error_code = 0 /* BROTLI_DECODER_NO_ERROR */
|
||||
|
||||
initBitReader(&s.br)
|
||||
s.state = stateUninited
|
||||
s.large_window = false
|
||||
s.substate_metablock_header = stateMetablockHeaderNone
|
||||
s.substate_tree_group = stateTreeGroupNone
|
||||
s.substate_context_map = stateContextMapNone
|
||||
s.substate_uncompressed = stateUncompressedNone
|
||||
s.substate_huffman = stateHuffmanNone
|
||||
s.substate_decode_uint8 = stateDecodeUint8None
|
||||
s.substate_read_block_length = stateReadBlockLengthNone
|
||||
|
||||
s.buffer_length = 0
|
||||
s.loop_counter = 0
|
||||
s.pos = 0
|
||||
s.rb_roundtrips = 0
|
||||
s.partial_pos_out = 0
|
||||
|
||||
s.block_type_trees = nil
|
||||
s.block_len_trees = nil
|
||||
s.ringbuffer_size = 0
|
||||
s.new_ringbuffer_size = 0
|
||||
s.ringbuffer_mask = 0
|
||||
|
||||
s.context_map = nil
|
||||
s.context_modes = nil
|
||||
s.dist_context_map = nil
|
||||
s.context_map_slice = nil
|
||||
s.dist_context_map_slice = nil
|
||||
|
||||
s.sub_loop_counter = 0
|
||||
|
||||
s.literal_hgroup.codes = nil
|
||||
s.literal_hgroup.htrees = nil
|
||||
s.insert_copy_hgroup.codes = nil
|
||||
s.insert_copy_hgroup.htrees = nil
|
||||
s.distance_hgroup.codes = nil
|
||||
s.distance_hgroup.htrees = nil
|
||||
|
||||
s.is_last_metablock = 0
|
||||
s.is_uncompressed = 0
|
||||
s.is_metadata = 0
|
||||
s.should_wrap_ringbuffer = 0
|
||||
s.canny_ringbuffer_allocation = 1
|
||||
|
||||
s.window_bits = 0
|
||||
s.max_distance = 0
|
||||
s.dist_rb[0] = 16
|
||||
s.dist_rb[1] = 15
|
||||
s.dist_rb[2] = 11
|
||||
s.dist_rb[3] = 4
|
||||
s.dist_rb_idx = 0
|
||||
s.block_type_trees = nil
|
||||
s.block_len_trees = nil
|
||||
|
||||
s.symbol_lists.storage = s.symbols_lists_array[:]
|
||||
s.symbol_lists.offset = huffmanMaxCodeLength + 1
|
||||
|
||||
s.dictionary = getDictionary()
|
||||
s.transforms = getTransforms()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func decoderStateMetablockBegin(s *Reader) {
|
||||
s.meta_block_remaining_len = 0
|
||||
s.block_length[0] = 1 << 24
|
||||
s.block_length[1] = 1 << 24
|
||||
s.block_length[2] = 1 << 24
|
||||
s.num_block_types[0] = 1
|
||||
s.num_block_types[1] = 1
|
||||
s.num_block_types[2] = 1
|
||||
s.block_type_rb[0] = 1
|
||||
s.block_type_rb[1] = 0
|
||||
s.block_type_rb[2] = 1
|
||||
s.block_type_rb[3] = 0
|
||||
s.block_type_rb[4] = 1
|
||||
s.block_type_rb[5] = 0
|
||||
s.context_map = nil
|
||||
s.context_modes = nil
|
||||
s.dist_context_map = nil
|
||||
s.context_map_slice = nil
|
||||
s.literal_htree = nil
|
||||
s.dist_context_map_slice = nil
|
||||
s.dist_htree_index = 0
|
||||
s.context_lookup = nil
|
||||
s.literal_hgroup.codes = nil
|
||||
s.literal_hgroup.htrees = nil
|
||||
s.insert_copy_hgroup.codes = nil
|
||||
s.insert_copy_hgroup.htrees = nil
|
||||
s.distance_hgroup.codes = nil
|
||||
s.distance_hgroup.htrees = nil
|
||||
}
|
||||
|
||||
func decoderStateCleanupAfterMetablock(s *Reader) {
|
||||
s.context_modes = nil
|
||||
s.context_map = nil
|
||||
s.dist_context_map = nil
|
||||
s.literal_hgroup.htrees = nil
|
||||
s.insert_copy_hgroup.htrees = nil
|
||||
s.distance_hgroup.htrees = nil
|
||||
}
|
||||
|
||||
func decoderHuffmanTreeGroupInit(s *Reader, group *huffmanTreeGroup, alphabet_size uint32, max_symbol uint32, ntrees uint32) bool {
|
||||
var max_table_size uint = uint(kMaxHuffmanTableSize[(alphabet_size+31)>>5])
|
||||
group.alphabet_size = uint16(alphabet_size)
|
||||
group.max_symbol = uint16(max_symbol)
|
||||
group.num_htrees = uint16(ntrees)
|
||||
group.htrees = make([][]huffmanCode, ntrees)
|
||||
group.codes = make([]huffmanCode, (uint(ntrees) * max_table_size))
|
||||
return !(group.codes == nil)
|
||||
}
|
||||
662
vendor/github.com/andybalholm/brotli/static_dict.go
generated
vendored
Normal file
662
vendor/github.com/andybalholm/brotli/static_dict.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
75094
vendor/github.com/andybalholm/brotli/static_dict_lut.go
generated
vendored
Normal file
75094
vendor/github.com/andybalholm/brotli/static_dict_lut.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
vendor/github.com/andybalholm/brotli/symbol_list.go
generated
vendored
Normal file
22
vendor/github.com/andybalholm/brotli/symbol_list.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Utilities for building Huffman decoding tables. */
|
||||
|
||||
type symbolList struct {
|
||||
storage []uint16
|
||||
offset int
|
||||
}
|
||||
|
||||
func symbolListGet(sl symbolList, i int) uint16 {
|
||||
return sl.storage[i+sl.offset]
|
||||
}
|
||||
|
||||
func symbolListPut(sl symbolList, i int, val uint16) {
|
||||
sl.storage[i+sl.offset] = val
|
||||
}
|
||||
641
vendor/github.com/andybalholm/brotli/transform.go
generated
vendored
Normal file
641
vendor/github.com/andybalholm/brotli/transform.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
70
vendor/github.com/andybalholm/brotli/utf8_util.go
generated
vendored
Normal file
70
vendor/github.com/andybalholm/brotli/utf8_util.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Heuristics for deciding about the UTF8-ness of strings. */
|
||||
|
||||
const kMinUTF8Ratio float64 = 0.75
|
||||
|
||||
/* Returns 1 if at least min_fraction of the bytes between pos and
|
||||
pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise
|
||||
returns 0. */
|
||||
func parseAsUTF8(symbol *int, input []byte, size uint) uint {
|
||||
/* ASCII */
|
||||
if input[0]&0x80 == 0 {
|
||||
*symbol = int(input[0])
|
||||
if *symbol > 0 {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
/* 2-byte UTF8 */
|
||||
if size > 1 && input[0]&0xE0 == 0xC0 && input[1]&0xC0 == 0x80 {
|
||||
*symbol = (int(input[0])&0x1F)<<6 | int(input[1])&0x3F
|
||||
if *symbol > 0x7F {
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
/* 3-byte UFT8 */
|
||||
if size > 2 && input[0]&0xF0 == 0xE0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 {
|
||||
*symbol = (int(input[0])&0x0F)<<12 | (int(input[1])&0x3F)<<6 | int(input[2])&0x3F
|
||||
if *symbol > 0x7FF {
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
/* 4-byte UFT8 */
|
||||
if size > 3 && input[0]&0xF8 == 0xF0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 && input[3]&0xC0 == 0x80 {
|
||||
*symbol = (int(input[0])&0x07)<<18 | (int(input[1])&0x3F)<<12 | (int(input[2])&0x3F)<<6 | int(input[3])&0x3F
|
||||
if *symbol > 0xFFFF && *symbol <= 0x10FFFF {
|
||||
return 4
|
||||
}
|
||||
}
|
||||
|
||||
/* Not UTF8, emit a special symbol above the UTF8-code space */
|
||||
*symbol = 0x110000 | int(input[0])
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
/* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/
|
||||
func isMostlyUTF8(data []byte, pos uint, mask uint, length uint, min_fraction float64) bool {
|
||||
var size_utf8 uint = 0
|
||||
var i uint = 0
|
||||
for i < length {
|
||||
var symbol int
|
||||
current_data := data[(pos+i)&mask:]
|
||||
var bytes_read uint = parseAsUTF8(&symbol, current_data, length-i)
|
||||
i += bytes_read
|
||||
if symbol < 0x110000 {
|
||||
size_utf8 += bytes_read
|
||||
}
|
||||
}
|
||||
|
||||
return float64(size_utf8) > min_fraction*float64(length)
|
||||
}
|
||||
7
vendor/github.com/andybalholm/brotli/util.go
generated
vendored
Normal file
7
vendor/github.com/andybalholm/brotli/util.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package brotli
|
||||
|
||||
func assert(cond bool) {
|
||||
if !cond {
|
||||
panic("assertion failure")
|
||||
}
|
||||
}
|
||||
52
vendor/github.com/andybalholm/brotli/write_bits.go
generated
vendored
Normal file
52
vendor/github.com/andybalholm/brotli/write_bits.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Write bits into a byte array. */
|
||||
|
||||
/* This function writes bits into bytes in increasing addresses, and within
|
||||
a byte least-significant-bit first.
|
||||
|
||||
The function can write up to 56 bits in one go with WriteBits
|
||||
Example: let's assume that 3 bits (Rs below) have been written already:
|
||||
|
||||
BYTE-0 BYTE+1 BYTE+2
|
||||
|
||||
0000 0RRR 0000 0000 0000 0000
|
||||
|
||||
Now, we could write 5 or less bits in MSB by just sifting by 3
|
||||
and OR'ing to BYTE-0.
|
||||
|
||||
For n bits, we take the last 5 bits, OR that with high bits in BYTE-0,
|
||||
and locate the rest in BYTE+1, BYTE+2, etc. */
|
||||
func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) {
|
||||
/* This branch of the code can write up to 56 bits at a time,
|
||||
7 bits are lost by being perhaps already in *p and at least
|
||||
1 bit is needed to initialize the bit-stream ahead (i.e. if 7
|
||||
bits are in *p and we write 57 bits, then the next write will
|
||||
access a byte that was never initialized). */
|
||||
p := array[*pos>>3:]
|
||||
v := uint64(p[0])
|
||||
v |= bits << (*pos & 7)
|
||||
binary.LittleEndian.PutUint64(p, v)
|
||||
*pos += n_bits
|
||||
}
|
||||
|
||||
func writeSingleBit(bit bool, pos *uint, array []byte) {
|
||||
if bit {
|
||||
writeBits(1, 1, pos, array)
|
||||
} else {
|
||||
writeBits(1, 0, pos, array)
|
||||
}
|
||||
}
|
||||
|
||||
func writeBitsPrepareStorage(pos uint, array []byte) {
|
||||
assert(pos&7 == 0)
|
||||
array[pos>>3] = 0
|
||||
}
|
||||
162
vendor/github.com/andybalholm/brotli/writer.go
generated
vendored
Normal file
162
vendor/github.com/andybalholm/brotli/writer.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/andybalholm/brotli/matchfinder"
|
||||
)
|
||||
|
||||
const (
|
||||
BestSpeed = 0
|
||||
BestCompression = 11
|
||||
DefaultCompression = 6
|
||||
)
|
||||
|
||||
// WriterOptions configures Writer.
|
||||
type WriterOptions struct {
|
||||
// Quality controls the compression-speed vs compression-density trade-offs.
|
||||
// The higher the quality, the slower the compression. Range is 0 to 11.
|
||||
Quality int
|
||||
// LGWin is the base 2 logarithm of the sliding window size.
|
||||
// Range is 10 to 24. 0 indicates automatic configuration based on Quality.
|
||||
LGWin int
|
||||
}
|
||||
|
||||
var (
|
||||
errEncode = errors.New("brotli: encode error")
|
||||
errWriterClosed = errors.New("brotli: Writer is closed")
|
||||
)
|
||||
|
||||
// Writes to the returned writer are compressed and written to dst.
|
||||
// It is the caller's responsibility to call Close on the Writer when done.
|
||||
// Writes may be buffered and not flushed until Close.
|
||||
func NewWriter(dst io.Writer) *Writer {
|
||||
return NewWriterLevel(dst, DefaultCompression)
|
||||
}
|
||||
|
||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
// The compression level can be DefaultCompression or any integer value between
|
||||
// BestSpeed and BestCompression inclusive.
|
||||
func NewWriterLevel(dst io.Writer, level int) *Writer {
|
||||
return NewWriterOptions(dst, WriterOptions{
|
||||
Quality: level,
|
||||
})
|
||||
}
|
||||
|
||||
// NewWriterOptions is like NewWriter but specifies WriterOptions
|
||||
func NewWriterOptions(dst io.Writer, options WriterOptions) *Writer {
|
||||
w := new(Writer)
|
||||
w.options = options
|
||||
w.Reset(dst)
|
||||
return w
|
||||
}
|
||||
|
||||
// Reset discards the Writer's state and makes it equivalent to the result of
|
||||
// its original state from NewWriter or NewWriterLevel, but writing to dst
|
||||
// instead. This permits reusing a Writer rather than allocating a new one.
|
||||
func (w *Writer) Reset(dst io.Writer) {
|
||||
encoderInitState(w)
|
||||
w.params.quality = w.options.Quality
|
||||
if w.options.LGWin > 0 {
|
||||
w.params.lgwin = uint(w.options.LGWin)
|
||||
}
|
||||
w.dst = dst
|
||||
w.err = nil
|
||||
}
|
||||
|
||||
func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
|
||||
if w.dst == nil {
|
||||
return 0, errWriterClosed
|
||||
}
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
for {
|
||||
availableIn := uint(len(p))
|
||||
nextIn := p
|
||||
success := encoderCompressStream(w, op, &availableIn, &nextIn)
|
||||
bytesConsumed := len(p) - int(availableIn)
|
||||
p = p[bytesConsumed:]
|
||||
n += bytesConsumed
|
||||
if !success {
|
||||
return n, errEncode
|
||||
}
|
||||
|
||||
if len(p) == 0 || w.err != nil {
|
||||
return n, w.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush outputs encoded data for all input provided to Write. The resulting
|
||||
// output can be decoded to match all input before Flush, but the stream is
|
||||
// not yet complete until after Close.
|
||||
// Flush has a negative impact on compression.
|
||||
func (w *Writer) Flush() error {
|
||||
_, err := w.writeChunk(nil, operationFlush)
|
||||
return err
|
||||
}
|
||||
|
||||
// Close flushes remaining data to the decorated writer.
|
||||
func (w *Writer) Close() error {
|
||||
// If stream is already closed, it is reported by `writeChunk`.
|
||||
_, err := w.writeChunk(nil, operationFinish)
|
||||
w.dst = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Write implements io.Writer. Flush or Close must be called to ensure that the
|
||||
// encoded bytes are actually flushed to the underlying Writer.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
return w.writeChunk(p, operationProcess)
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error { return nil }
|
||||
|
||||
// NewWriterV2 is like NewWriterLevel, but it uses the new implementation
|
||||
// based on the matchfinder package. It currently supports up to level 7;
|
||||
// if a higher level is specified, level 7 will be used.
|
||||
func NewWriterV2(dst io.Writer, level int) *matchfinder.Writer {
|
||||
var mf matchfinder.MatchFinder
|
||||
if level < 2 {
|
||||
mf = matchfinder.M0{Lazy: level == 1}
|
||||
} else {
|
||||
hashLen := 6
|
||||
if level >= 6 {
|
||||
hashLen = 5
|
||||
}
|
||||
chainLen := 64
|
||||
switch level {
|
||||
case 2:
|
||||
chainLen = 0
|
||||
case 3:
|
||||
chainLen = 1
|
||||
case 4:
|
||||
chainLen = 2
|
||||
case 5:
|
||||
chainLen = 4
|
||||
case 6:
|
||||
chainLen = 8
|
||||
}
|
||||
mf = &matchfinder.M4{
|
||||
MaxDistance: 1 << 20,
|
||||
ChainLength: chainLen,
|
||||
HashLen: hashLen,
|
||||
DistanceBitCost: 57,
|
||||
}
|
||||
}
|
||||
|
||||
return &matchfinder.Writer{
|
||||
Dest: dst,
|
||||
MatchFinder: mf,
|
||||
Encoder: &Encoder{},
|
||||
BlockSize: 1 << 16,
|
||||
}
|
||||
}
|
||||
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -39,6 +39,10 @@ github.com/adhocore/gronx
|
||||
# github.com/agnivade/levenshtein v1.1.1
|
||||
## explicit; go 1.13
|
||||
github.com/agnivade/levenshtein
|
||||
# github.com/andybalholm/brotli v1.1.0
|
||||
## explicit; go 1.13
|
||||
github.com/andybalholm/brotli
|
||||
github.com/andybalholm/brotli/matchfinder
|
||||
# github.com/armon/go-metrics v0.4.1
|
||||
## explicit; go 1.12
|
||||
github.com/armon/go-metrics
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user