blockdec.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. // Copyright 2019+ Klaus Post. All rights reserved.
  2. // License information can be found in the LICENSE file.
  3. // Based on work by Yann Collet, released under BSD License.
  4. package zstd
  5. import (
  6. "errors"
  7. "fmt"
  8. "io"
  9. "sync"
  10. "github.com/klauspost/compress/huff0"
  11. "github.com/klauspost/compress/zstd/internal/xxhash"
  12. )
  13. type blockType uint8
  14. //go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
  15. const (
  16. blockTypeRaw blockType = iota
  17. blockTypeRLE
  18. blockTypeCompressed
  19. blockTypeReserved
  20. )
  21. type literalsBlockType uint8
  22. const (
  23. literalsBlockRaw literalsBlockType = iota
  24. literalsBlockRLE
  25. literalsBlockCompressed
  26. literalsBlockTreeless
  27. )
  28. const (
  29. // maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
  30. maxCompressedBlockSize = 128 << 10
  31. // Maximum possible block size (all Raw+Uncompressed).
  32. maxBlockSize = (1 << 21) - 1
  33. // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
  34. maxCompressedLiteralSize = 1 << 18
  35. maxRLELiteralSize = 1 << 20
  36. maxMatchLen = 131074
  37. maxSequences = 0x7f00 + 0xffff
  38. // We support slightly less than the reference decoder to be able to
  39. // use ints on 32 bit archs.
  40. maxOffsetBits = 30
  41. )
  42. var (
  43. huffDecoderPool = sync.Pool{New: func() interface{} {
  44. return &huff0.Scratch{}
  45. }}
  46. fseDecoderPool = sync.Pool{New: func() interface{} {
  47. return &fseDecoder{}
  48. }}
  49. )
  50. type blockDec struct {
  51. // Raw source data of the block.
  52. data []byte
  53. dataStorage []byte
  54. // Destination of the decoded data.
  55. dst []byte
  56. // Buffer for literals data.
  57. literalBuf []byte
  58. // Window size of the block.
  59. WindowSize uint64
  60. Type blockType
  61. RLESize uint32
  62. // Is this the last block of a frame?
  63. Last bool
  64. // Use less memory
  65. lowMem bool
  66. history chan *history
  67. input chan struct{}
  68. result chan decodeOutput
  69. sequenceBuf []seq
  70. tmp [4]byte
  71. err error
  72. decWG sync.WaitGroup
  73. }
  74. func (b *blockDec) String() string {
  75. if b == nil {
  76. return "<nil>"
  77. }
  78. return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
  79. }
  80. func newBlockDec(lowMem bool) *blockDec {
  81. b := blockDec{
  82. lowMem: lowMem,
  83. result: make(chan decodeOutput, 1),
  84. input: make(chan struct{}, 1),
  85. history: make(chan *history, 1),
  86. }
  87. b.decWG.Add(1)
  88. go b.startDecoder()
  89. return &b
  90. }
  91. // reset will reset the block.
  92. // Input must be a start of a block and will be at the end of the block when returned.
  93. func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
  94. b.WindowSize = windowSize
  95. tmp := br.readSmall(3)
  96. if tmp == nil {
  97. if debug {
  98. println("Reading block header:", io.ErrUnexpectedEOF)
  99. }
  100. return io.ErrUnexpectedEOF
  101. }
  102. bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
  103. b.Last = bh&1 != 0
  104. b.Type = blockType((bh >> 1) & 3)
  105. // find size.
  106. cSize := int(bh >> 3)
  107. switch b.Type {
  108. case blockTypeReserved:
  109. return ErrReservedBlockType
  110. case blockTypeRLE:
  111. b.RLESize = uint32(cSize)
  112. cSize = 1
  113. case blockTypeCompressed:
  114. if debug {
  115. println("Data size on stream:", cSize)
  116. }
  117. b.RLESize = 0
  118. if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
  119. if debug {
  120. printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
  121. }
  122. return ErrCompressedSizeTooBig
  123. }
  124. default:
  125. b.RLESize = 0
  126. }
  127. // Read block data.
  128. if cap(b.dataStorage) < cSize {
  129. if b.lowMem {
  130. b.dataStorage = make([]byte, 0, cSize)
  131. } else {
  132. b.dataStorage = make([]byte, 0, maxBlockSize)
  133. }
  134. }
  135. if cap(b.dst) <= maxBlockSize {
  136. b.dst = make([]byte, 0, maxBlockSize+1)
  137. }
  138. var err error
  139. b.data, err = br.readBig(cSize, b.dataStorage)
  140. if err != nil {
  141. if debug {
  142. println("Reading block:", err, "(", cSize, ")", len(b.data))
  143. printf("%T", br)
  144. }
  145. return err
  146. }
  147. return nil
  148. }
  149. // sendEOF will make the decoder send EOF on this frame.
  150. func (b *blockDec) sendErr(err error) {
  151. b.Last = true
  152. b.Type = blockTypeReserved
  153. b.err = err
  154. b.input <- struct{}{}
  155. }
  156. // Close will release resources.
  157. // Closed blockDec cannot be reset.
  158. func (b *blockDec) Close() {
  159. close(b.input)
  160. close(b.history)
  161. close(b.result)
  162. b.decWG.Wait()
  163. }
  164. // decodeAsync will prepare decoding the block when it receives input.
  165. // This will separate output and history.
  166. func (b *blockDec) startDecoder() {
  167. defer b.decWG.Done()
  168. for range b.input {
  169. //println("blockDec: Got block input")
  170. switch b.Type {
  171. case blockTypeRLE:
  172. if cap(b.dst) < int(b.RLESize) {
  173. if b.lowMem {
  174. b.dst = make([]byte, b.RLESize)
  175. } else {
  176. b.dst = make([]byte, maxBlockSize)
  177. }
  178. }
  179. o := decodeOutput{
  180. d: b,
  181. b: b.dst[:b.RLESize],
  182. err: nil,
  183. }
  184. v := b.data[0]
  185. for i := range o.b {
  186. o.b[i] = v
  187. }
  188. hist := <-b.history
  189. hist.append(o.b)
  190. b.result <- o
  191. case blockTypeRaw:
  192. o := decodeOutput{
  193. d: b,
  194. b: b.data,
  195. err: nil,
  196. }
  197. hist := <-b.history
  198. hist.append(o.b)
  199. b.result <- o
  200. case blockTypeCompressed:
  201. b.dst = b.dst[:0]
  202. err := b.decodeCompressed(nil)
  203. o := decodeOutput{
  204. d: b,
  205. b: b.dst,
  206. err: err,
  207. }
  208. if debug {
  209. println("Decompressed to", len(b.dst), "bytes, error:", err)
  210. }
  211. b.result <- o
  212. case blockTypeReserved:
  213. // Used for returning errors.
  214. <-b.history
  215. b.result <- decodeOutput{
  216. d: b,
  217. b: nil,
  218. err: b.err,
  219. }
  220. default:
  221. panic("Invalid block type")
  222. }
  223. if debug {
  224. println("blockDec: Finished block")
  225. }
  226. }
  227. }
  228. // decodeAsync will prepare decoding the block when it receives the history.
  229. // If history is provided, it will not fetch it from the channel.
  230. func (b *blockDec) decodeBuf(hist *history) error {
  231. switch b.Type {
  232. case blockTypeRLE:
  233. if cap(b.dst) < int(b.RLESize) {
  234. if b.lowMem {
  235. b.dst = make([]byte, b.RLESize)
  236. } else {
  237. b.dst = make([]byte, maxBlockSize)
  238. }
  239. }
  240. b.dst = b.dst[:b.RLESize]
  241. v := b.data[0]
  242. for i := range b.dst {
  243. b.dst[i] = v
  244. }
  245. hist.appendKeep(b.dst)
  246. return nil
  247. case blockTypeRaw:
  248. hist.appendKeep(b.data)
  249. return nil
  250. case blockTypeCompressed:
  251. saved := b.dst
  252. b.dst = hist.b
  253. hist.b = nil
  254. err := b.decodeCompressed(hist)
  255. if debug {
  256. println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
  257. }
  258. hist.b = b.dst
  259. b.dst = saved
  260. return err
  261. case blockTypeReserved:
  262. // Used for returning errors.
  263. return b.err
  264. default:
  265. panic("Invalid block type")
  266. }
  267. }
  268. // decodeCompressed will start decompressing a block.
  269. // If no history is supplied the decoder will decodeAsync as much as possible
  270. // before fetching from blockDec.history
  271. func (b *blockDec) decodeCompressed(hist *history) error {
  272. in := b.data
  273. delayedHistory := hist == nil
  274. if delayedHistory {
  275. // We must always grab history.
  276. defer func() {
  277. if hist == nil {
  278. <-b.history
  279. }
  280. }()
  281. }
  282. // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
  283. if len(in) < 2 {
  284. return ErrBlockTooSmall
  285. }
  286. litType := literalsBlockType(in[0] & 3)
  287. var litRegenSize int
  288. var litCompSize int
  289. sizeFormat := (in[0] >> 2) & 3
  290. var fourStreams bool
  291. switch litType {
  292. case literalsBlockRaw, literalsBlockRLE:
  293. switch sizeFormat {
  294. case 0, 2:
  295. // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
  296. litRegenSize = int(in[0] >> 3)
  297. in = in[1:]
  298. case 1:
  299. // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
  300. litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
  301. in = in[2:]
  302. case 3:
  303. // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
  304. if len(in) < 3 {
  305. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  306. return ErrBlockTooSmall
  307. }
  308. litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
  309. in = in[3:]
  310. }
  311. case literalsBlockCompressed, literalsBlockTreeless:
  312. switch sizeFormat {
  313. case 0, 1:
  314. // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
  315. if len(in) < 3 {
  316. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  317. return ErrBlockTooSmall
  318. }
  319. n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
  320. litRegenSize = int(n & 1023)
  321. litCompSize = int(n >> 10)
  322. fourStreams = sizeFormat == 1
  323. in = in[3:]
  324. case 2:
  325. fourStreams = true
  326. if len(in) < 4 {
  327. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  328. return ErrBlockTooSmall
  329. }
  330. n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
  331. litRegenSize = int(n & 16383)
  332. litCompSize = int(n >> 14)
  333. in = in[4:]
  334. case 3:
  335. fourStreams = true
  336. if len(in) < 5 {
  337. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  338. return ErrBlockTooSmall
  339. }
  340. n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
  341. litRegenSize = int(n & 262143)
  342. litCompSize = int(n >> 18)
  343. in = in[5:]
  344. }
  345. }
  346. if debug {
  347. println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
  348. }
  349. var literals []byte
  350. var huff *huff0.Scratch
  351. switch litType {
  352. case literalsBlockRaw:
  353. if len(in) < litRegenSize {
  354. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
  355. return ErrBlockTooSmall
  356. }
  357. literals = in[:litRegenSize]
  358. in = in[litRegenSize:]
  359. //printf("Found %d uncompressed literals\n", litRegenSize)
  360. case literalsBlockRLE:
  361. if len(in) < 1 {
  362. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
  363. return ErrBlockTooSmall
  364. }
  365. if cap(b.literalBuf) < litRegenSize {
  366. if b.lowMem {
  367. b.literalBuf = make([]byte, litRegenSize)
  368. } else {
  369. if litRegenSize > maxCompressedLiteralSize {
  370. // Exceptional
  371. b.literalBuf = make([]byte, litRegenSize)
  372. } else {
  373. b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
  374. }
  375. }
  376. }
  377. literals = b.literalBuf[:litRegenSize]
  378. v := in[0]
  379. for i := range literals {
  380. literals[i] = v
  381. }
  382. in = in[1:]
  383. if debug {
  384. printf("Found %d RLE compressed literals\n", litRegenSize)
  385. }
  386. case literalsBlockTreeless:
  387. if len(in) < litCompSize {
  388. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
  389. return ErrBlockTooSmall
  390. }
  391. // Store compressed literals, so we defer decoding until we get history.
  392. literals = in[:litCompSize]
  393. in = in[litCompSize:]
  394. if debug {
  395. printf("Found %d compressed literals\n", litCompSize)
  396. }
  397. case literalsBlockCompressed:
  398. if len(in) < litCompSize {
  399. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
  400. return ErrBlockTooSmall
  401. }
  402. literals = in[:litCompSize]
  403. in = in[litCompSize:]
  404. huff = huffDecoderPool.Get().(*huff0.Scratch)
  405. var err error
  406. // Ensure we have space to store it.
  407. if cap(b.literalBuf) < litRegenSize {
  408. if b.lowMem {
  409. b.literalBuf = make([]byte, 0, litRegenSize)
  410. } else {
  411. b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
  412. }
  413. }
  414. if huff == nil {
  415. huff = &huff0.Scratch{}
  416. }
  417. huff.Out = b.literalBuf[:0]
  418. huff, literals, err = huff0.ReadTable(literals, huff)
  419. if err != nil {
  420. println("reading huffman table:", err)
  421. return err
  422. }
  423. // Use our out buffer.
  424. huff.Out = b.literalBuf[:0]
  425. huff.MaxDecodedSize = litRegenSize
  426. if fourStreams {
  427. literals, err = huff.Decompress4X(literals, litRegenSize)
  428. } else {
  429. literals, err = huff.Decompress1X(literals)
  430. }
  431. if err != nil {
  432. println("decoding compressed literals:", err)
  433. return err
  434. }
  435. // Make sure we don't leak our literals buffer
  436. huff.Out = nil
  437. if len(literals) != litRegenSize {
  438. return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
  439. }
  440. if debug {
  441. printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
  442. }
  443. }
  444. // Decode Sequences
  445. // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
  446. if len(in) < 1 {
  447. return ErrBlockTooSmall
  448. }
  449. seqHeader := in[0]
  450. nSeqs := 0
  451. switch {
  452. case seqHeader == 0:
  453. in = in[1:]
  454. case seqHeader < 128:
  455. nSeqs = int(seqHeader)
  456. in = in[1:]
  457. case seqHeader < 255:
  458. if len(in) < 2 {
  459. return ErrBlockTooSmall
  460. }
  461. nSeqs = int(seqHeader-128)<<8 | int(in[1])
  462. in = in[2:]
  463. case seqHeader == 255:
  464. if len(in) < 3 {
  465. return ErrBlockTooSmall
  466. }
  467. nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
  468. in = in[3:]
  469. }
  470. // Allocate sequences
  471. if cap(b.sequenceBuf) < nSeqs {
  472. if b.lowMem {
  473. b.sequenceBuf = make([]seq, nSeqs)
  474. } else {
  475. // Allocate max
  476. b.sequenceBuf = make([]seq, nSeqs, maxSequences)
  477. }
  478. } else {
  479. // Reuse buffer
  480. b.sequenceBuf = b.sequenceBuf[:nSeqs]
  481. }
  482. var seqs = &sequenceDecs{}
  483. if nSeqs > 0 {
  484. if len(in) < 1 {
  485. return ErrBlockTooSmall
  486. }
  487. br := byteReader{b: in, off: 0}
  488. compMode := br.Uint8()
  489. br.advance(1)
  490. if debug {
  491. printf("Compression modes: 0b%b", compMode)
  492. }
  493. for i := uint(0); i < 3; i++ {
  494. mode := seqCompMode((compMode >> (6 - i*2)) & 3)
  495. if debug {
  496. println("Table", tableIndex(i), "is", mode)
  497. }
  498. var seq *sequenceDec
  499. switch tableIndex(i) {
  500. case tableLiteralLengths:
  501. seq = &seqs.litLengths
  502. case tableOffsets:
  503. seq = &seqs.offsets
  504. case tableMatchLengths:
  505. seq = &seqs.matchLengths
  506. default:
  507. panic("unknown table")
  508. }
  509. switch mode {
  510. case compModePredefined:
  511. seq.fse = &fsePredef[i]
  512. case compModeRLE:
  513. if br.remain() < 1 {
  514. return ErrBlockTooSmall
  515. }
  516. v := br.Uint8()
  517. br.advance(1)
  518. dec := fseDecoderPool.Get().(*fseDecoder)
  519. symb, err := decSymbolValue(v, symbolTableX[i])
  520. if err != nil {
  521. printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
  522. return err
  523. }
  524. dec.setRLE(symb)
  525. seq.fse = dec
  526. if debug {
  527. printf("RLE set to %+v, code: %v", symb, v)
  528. }
  529. case compModeFSE:
  530. println("Reading table for", tableIndex(i))
  531. dec := fseDecoderPool.Get().(*fseDecoder)
  532. err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
  533. if err != nil {
  534. println("Read table error:", err)
  535. return err
  536. }
  537. err = dec.transform(symbolTableX[i])
  538. if err != nil {
  539. println("Transform table error:", err)
  540. return err
  541. }
  542. if debug {
  543. println("Read table ok", "symbolLen:", dec.symbolLen)
  544. }
  545. seq.fse = dec
  546. case compModeRepeat:
  547. seq.repeat = true
  548. }
  549. if br.overread() {
  550. return io.ErrUnexpectedEOF
  551. }
  552. }
  553. in = br.unread()
  554. }
  555. // Wait for history.
  556. // All time spent after this is critical since it is strictly sequential.
  557. if hist == nil {
  558. hist = <-b.history
  559. if hist.error {
  560. return ErrDecoderClosed
  561. }
  562. }
  563. // Decode treeless literal block.
  564. if litType == literalsBlockTreeless {
  565. // TODO: We could send the history early WITHOUT the stream history.
  566. // This would allow decoding treeless literials before the byte history is available.
  567. // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
  568. // So not much obvious gain here.
  569. if hist.huffTree == nil {
  570. return errors.New("literal block was treeless, but no history was defined")
  571. }
  572. // Ensure we have space to store it.
  573. if cap(b.literalBuf) < litRegenSize {
  574. if b.lowMem {
  575. b.literalBuf = make([]byte, 0, litRegenSize)
  576. } else {
  577. b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
  578. }
  579. }
  580. var err error
  581. // Use our out buffer.
  582. huff = hist.huffTree
  583. huff.Out = b.literalBuf[:0]
  584. huff.MaxDecodedSize = litRegenSize
  585. if fourStreams {
  586. literals, err = huff.Decompress4X(literals, litRegenSize)
  587. } else {
  588. literals, err = huff.Decompress1X(literals)
  589. }
  590. // Make sure we don't leak our literals buffer
  591. huff.Out = nil
  592. if err != nil {
  593. println("decompressing literals:", err)
  594. return err
  595. }
  596. if len(literals) != litRegenSize {
  597. return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
  598. }
  599. } else {
  600. if hist.huffTree != nil && huff != nil {
  601. huffDecoderPool.Put(hist.huffTree)
  602. hist.huffTree = nil
  603. }
  604. }
  605. if huff != nil {
  606. huff.Out = nil
  607. hist.huffTree = huff
  608. }
  609. if debug {
  610. println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
  611. }
  612. if nSeqs == 0 {
  613. // Decompressed content is defined entirely as Literals Section content.
  614. b.dst = append(b.dst, literals...)
  615. if delayedHistory {
  616. hist.append(literals)
  617. }
  618. return nil
  619. }
  620. seqs, err := seqs.mergeHistory(&hist.decoders)
  621. if err != nil {
  622. return err
  623. }
  624. if debug {
  625. println("History merged ok")
  626. }
  627. br := &bitReader{}
  628. if err := br.init(in); err != nil {
  629. return err
  630. }
  631. // TODO: Investigate if sending history without decoders are faster.
  632. // This would allow the sequences to be decoded async and only have to construct stream history.
  633. // If only recent offsets were not transferred, this would be an obvious win.
  634. // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
  635. if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
  636. println("initializing sequences:", err)
  637. return err
  638. }
  639. err = seqs.decode(nSeqs, br, hist.b)
  640. if err != nil {
  641. return err
  642. }
  643. if !br.finished() {
  644. return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
  645. }
  646. err = br.close()
  647. if err != nil {
  648. printf("Closing sequences: %v, %+v\n", err, *br)
  649. }
  650. if len(b.data) > maxCompressedBlockSize {
  651. return fmt.Errorf("compressed block size too large (%d)", len(b.data))
  652. }
  653. // Set output and release references.
  654. b.dst = seqs.out
  655. seqs.out, seqs.literals, seqs.hist = nil, nil, nil
  656. if !delayedHistory {
  657. // If we don't have delayed history, no need to update.
  658. hist.recentOffsets = seqs.prevOffset
  659. return nil
  660. }
  661. if b.Last {
  662. // if last block we don't care about history.
  663. println("Last block, no history returned")
  664. hist.b = hist.b[:0]
  665. return nil
  666. }
  667. hist.append(b.dst)
  668. hist.recentOffsets = seqs.prevOffset
  669. if debug {
  670. println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
  671. }
  672. return nil
  673. }