0
0
Fork 0
mirror of https://github.com/go-gitea/gitea synced 2024-12-21 06:54:14 +01:00
gitea/vendor/github.com/pierrec/lz4/v4/reader.go
6543 30ce3731a1
Vendor Update Go Libs (#13444)
* denisenkom/go-mssqldb untagged -> v0.9.0

* github.com/editorconfig/editorconfig-core-go v2.3.7 -> v2.3.8

* github.com/go-testfixtures/testfixtures v3.4.0 -> v3.4.1

* github.com/mholt/archiver v3.3.2 -> v3.5.0

* github.com/olivere/elastic v7.0.20 -> v7.0.21

* github.com/urfave/cli v1.22.4 -> v1.22.5

* github.com/xanzy/go-gitlab v0.38.1 -> v0.39.0

* github.com/yuin/goldmark-meta untagged -> v1.0.0

* github.com/ethantkoenig/rupture 0a76f03a811a -> c3b3b810dc77

* github.com/jaytaylor/html2text 8fb95d837f7d -> 3577fbdbcff7

* github.com/kballard/go-shellquote cd60e84ee657 -> 95032a82bc51

* github.com/msteinert/pam 02ccfbfaf0cc -> 913b8f8cdf8b

* github.com/unknwon/paginater 7748a72e0141 -> 042474bd0eae

* CI.restart()

Co-authored-by: techknowlogick <techknowlogick@gitea.io>
2020-11-06 13:41:42 -05:00

243 lines
5.1 KiB
Go
Vendored

package lz4
import (
"io"
"github.com/pierrec/lz4/v4/internal/lz4block"
"github.com/pierrec/lz4/v4/internal/lz4errors"
"github.com/pierrec/lz4/v4/internal/lz4stream"
)
var readerStates = []aState{
noState: newState,
errorState: newState,
newState: readState,
readState: closedState,
closedState: newState,
}
// NewReader returns a new LZ4 frame decoder.
func NewReader(r io.Reader) *Reader {
return newReader(r, false)
}
func newReader(r io.Reader, legacy bool) *Reader {
zr := &Reader{frame: lz4stream.NewFrame()}
zr.state.init(readerStates)
_ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
zr.Reset(r)
return zr
}
// Reader allows reading an LZ4 stream.
type Reader struct {
state _State
src io.Reader // source reader
num int // concurrency level
frame *lz4stream.Frame // frame being read
data []byte // block buffer allocated in non concurrent mode
reads chan []byte // pending data
idx int // size of pending data
handler func(int)
cum uint32
}
func (*Reader) private() {}
func (r *Reader) Apply(options ...Option) (err error) {
defer r.state.check(&err)
switch r.state.state {
case newState:
case errorState:
return r.state.err
default:
return lz4errors.ErrOptionClosedOrError
}
for _, o := range options {
if err = o(r); err != nil {
return
}
}
return
}
// Size returns the size of the underlying uncompressed data, if set in the stream.
func (r *Reader) Size() int {
switch r.state.state {
case readState, closedState:
if r.frame.Descriptor.Flags.Size() {
return int(r.frame.Descriptor.ContentSize)
}
}
return 0
}
func (r *Reader) isNotConcurrent() bool {
return r.num == 1
}
func (r *Reader) init() error {
data, err := r.frame.InitR(r.src, r.num)
if err != nil {
return err
}
r.reads = data
r.idx = 0
size := r.frame.Descriptor.Flags.BlockSizeIndex()
r.data = size.Get()
r.cum = 0
return nil
}
func (r *Reader) Read(buf []byte) (n int, err error) {
defer r.state.check(&err)
switch r.state.state {
case readState:
case closedState, errorState:
return 0, r.state.err
case newState:
// First initialization.
if err = r.init(); r.state.next(err) {
return
}
default:
return 0, r.state.fail()
}
for len(buf) > 0 {
var bn int
if r.idx == 0 {
if r.isNotConcurrent() {
bn, err = r.read(buf)
} else {
lz4block.Put(r.data)
r.data = <-r.reads
if len(r.data) == 0 {
// No uncompressed data: something went wrong or we are done.
err = r.frame.Blocks.ErrorR()
}
}
switch err {
case nil:
case io.EOF:
if er := r.frame.CloseR(r.src); er != nil {
err = er
}
lz4block.Put(r.data)
r.data = nil
return
default:
return
}
}
if bn == 0 {
// Fill buf with buffered data.
bn = copy(buf, r.data[r.idx:])
r.idx += bn
if r.idx == len(r.data) {
// All data read, get ready for the next Read.
r.idx = 0
}
}
buf = buf[bn:]
n += bn
r.handler(bn)
}
return
}
// read uncompresses the next block as follow:
// - if buf has enough room, the block is uncompressed into it directly
// and the lenght of used space is returned
// - else, the uncompress data is stored in r.data and 0 is returned
func (r *Reader) read(buf []byte) (int, error) {
block := r.frame.Blocks.Block
_, err := block.Read(r.frame, r.src, r.cum)
if err != nil {
return 0, err
}
var direct bool
dst := r.data[:cap(r.data)]
if len(buf) >= len(dst) {
// Uncompress directly into buf.
direct = true
dst = buf
}
dst, err = block.Uncompress(r.frame, dst, true)
if err != nil {
return 0, err
}
r.cum += uint32(len(dst))
if direct {
return len(dst), nil
}
r.data = dst
return 0, nil
}
// Reset clears the state of the Reader r such that it is equivalent to its
// initial state from NewReader, but instead writing to writer.
// No access to reader is performed.
//
// w.Close must be called before Reset.
func (r *Reader) Reset(reader io.Reader) {
if r.data != nil {
lz4block.Put(r.data)
r.data = nil
}
r.frame.Reset(r.num)
r.state.reset()
r.src = reader
r.reads = nil
}
// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
switch r.state.state {
case closedState, errorState:
return 0, r.state.err
case newState:
if err = r.init(); r.state.next(err) {
return
}
default:
return 0, r.state.fail()
}
defer r.state.nextd(&err)
var data []byte
if r.isNotConcurrent() {
size := r.frame.Descriptor.Flags.BlockSizeIndex()
data = size.Get()
defer lz4block.Put(data)
}
for {
var bn int
var dst []byte
if r.isNotConcurrent() {
bn, err = r.read(data)
dst = data[:bn]
} else {
lz4block.Put(dst)
dst = <-r.reads
bn = len(dst)
if bn == 0 {
// No uncompressed data: something went wrong or we are done.
err = r.frame.Blocks.ErrorR()
}
}
switch err {
case nil:
case io.EOF:
err = r.frame.CloseR(r.src)
return
default:
return
}
r.handler(bn)
bn, err = w.Write(dst)
n += int64(bn)
if err != nil {
return
}
}
}