2 * LZ4 - Fast LZ compression algorithm
3 * Copyright (C) 2011 - 2016, Yann Collet.
4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * You can contact the author at :
26 * - LZ4 homepage : http://www.lz4.org
27 * - LZ4 source repository : https://github.com/lz4/lz4
29 * Changed for kernel usage by:
30 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
33 /*-************************************
35 **************************************/
36 #include <linux/lz4.h>
38 #include <linux/kernel.h>
39 #include <asm/unaligned.h>
41 static const int LZ4_minLength = (MFLIMIT + 1);
42 static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
44 /*-******************************
45 * Compression functions
46 ********************************/
47 static FORCE_INLINE U32 LZ4_hash4(
49 tableType_t const tableType)
51 if (tableType == byU16)
52 return ((sequence * 2654435761U)
53 >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
55 return ((sequence * 2654435761U)
56 >> ((MINMATCH * 8) - LZ4_HASHLOG));
59 static FORCE_INLINE U32 LZ4_hash5(
61 tableType_t const tableType)
63 const U32 hashLog = (tableType == byU16)
68 static const U64 prime5bytes = 889523592379ULL;
70 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
72 static const U64 prime8bytes = 11400714785074694791ULL;
74 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
78 static FORCE_INLINE U32 LZ4_hashPosition(
80 tableType_t const tableType)
83 if (tableType == byU32)
84 return LZ4_hash5(LZ4_read_ARCH(p), tableType);
87 return LZ4_hash4(LZ4_read32(p), tableType);
90 static void LZ4_putPositionOnHash(
94 tableType_t const tableType,
100 const BYTE **hashTable = (const BYTE **)tableBase;
107 U32 *hashTable = (U32 *) tableBase;
109 hashTable[h] = (U32)(p - srcBase);
114 U16 *hashTable = (U16 *) tableBase;
116 hashTable[h] = (U16)(p - srcBase);
122 static FORCE_INLINE void LZ4_putPosition(
125 tableType_t tableType,
128 U32 const h = LZ4_hashPosition(p, tableType);
130 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
133 static const BYTE *LZ4_getPositionOnHash(
136 tableType_t tableType,
139 if (tableType == byPtr) {
140 const BYTE **hashTable = (const BYTE **) tableBase;
145 if (tableType == byU32) {
146 const U32 * const hashTable = (U32 *) tableBase;
148 return hashTable[h] + srcBase;
152 /* default, to ensure a return */
153 const U16 * const hashTable = (U16 *) tableBase;
155 return hashTable[h] + srcBase;
159 static FORCE_INLINE const BYTE *LZ4_getPosition(
162 tableType_t tableType,
165 U32 const h = LZ4_hashPosition(p, tableType);
167 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
172 * LZ4_compress_generic() :
173 * inlined, to ensure branches are decided at compilation time
175 static FORCE_INLINE int LZ4_compress_generic(
176 LZ4_stream_t_internal * const dictPtr,
177 const char * const source,
180 const int maxOutputSize,
181 const limitedOutput_directive outputLimited,
182 const tableType_t tableType,
183 const dict_directive dict,
184 const dictIssue_directive dictIssue,
185 const U32 acceleration)
187 const BYTE *ip = (const BYTE *) source;
189 const BYTE *lowLimit;
190 const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
191 const BYTE * const dictionary = dictPtr->dictionary;
192 const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
193 const size_t dictDelta = dictEnd - (const BYTE *)source;
194 const BYTE *anchor = (const BYTE *) source;
195 const BYTE * const iend = ip + inputSize;
196 const BYTE * const mflimit = iend - MFLIMIT;
197 const BYTE * const matchlimit = iend - LASTLITERALS;
199 BYTE *op = (BYTE *) dest;
200 BYTE * const olimit = op + maxOutputSize;
205 /* Init conditions */
206 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
207 /* Unsupported inputSize, too large (or negative) */
214 base = (const BYTE *)source;
215 lowLimit = (const BYTE *)source;
218 base = (const BYTE *)source - dictPtr->currentOffset;
219 lowLimit = (const BYTE *)source - dictPtr->dictSize;
222 base = (const BYTE *)source - dictPtr->currentOffset;
223 lowLimit = (const BYTE *)source;
227 if ((tableType == byU16)
228 && (inputSize >= LZ4_64Klimit)) {
229 /* Size too large (not within 64K limit) */
233 if (inputSize < LZ4_minLength) {
234 /* Input too small, no compression (all literals) */
239 LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
241 forwardH = LZ4_hashPosition(ip, tableType);
250 const BYTE *forwardIp = ip;
251 unsigned int step = 1;
252 unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
255 U32 const h = forwardH;
259 step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
261 if (unlikely(forwardIp > mflimit))
264 match = LZ4_getPositionOnHash(h,
268 if (dict == usingExtDict) {
269 if (match < (const BYTE *)source) {
270 refDelta = dictDelta;
271 lowLimit = dictionary;
274 lowLimit = (const BYTE *)source;
277 forwardH = LZ4_hashPosition(forwardIp,
280 LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
282 } while (((dictIssue == dictSmall)
283 ? (match < lowRefLimit)
285 || ((tableType == byU16)
287 : (match + MAX_DISTANCE < ip))
288 || (LZ4_read32(match + refDelta)
293 while (((ip > anchor) & (match + refDelta > lowLimit))
294 && (unlikely(ip[-1] == match[refDelta - 1]))) {
299 /* Encode Literals */
301 unsigned const int litLength = (unsigned int)(ip - anchor);
305 if ((outputLimited) &&
306 /* Check output buffer overflow */
307 (unlikely(op + litLength +
308 (2 + 1 + LASTLITERALS) +
309 (litLength / 255) > olimit)))
312 if (litLength >= RUN_MASK) {
313 int len = (int)litLength - RUN_MASK;
315 *token = (RUN_MASK << ML_BITS);
317 for (; len >= 255; len -= 255)
321 *token = (BYTE)(litLength << ML_BITS);
324 LZ4_wildCopy(op, anchor, op + litLength);
330 LZ4_writeLE16(op, (U16)(ip - match));
333 /* Encode MatchLength */
335 unsigned int matchCode;
337 if ((dict == usingExtDict)
338 && (lowLimit == dictionary)) {
342 limit = ip + (dictEnd - match);
344 if (limit > matchlimit)
347 matchCode = LZ4_count(ip + MINMATCH,
348 match + MINMATCH, limit);
350 ip += MINMATCH + matchCode;
353 unsigned const int more = LZ4_count(ip,
354 (const BYTE *)source,
361 matchCode = LZ4_count(ip + MINMATCH,
362 match + MINMATCH, matchlimit);
363 ip += MINMATCH + matchCode;
367 /* Check output buffer overflow */
370 (matchCode >> 8) > olimit)))
373 if (matchCode >= ML_MASK) {
375 matchCode -= ML_MASK;
376 LZ4_write32(op, 0xFFFFFFFF);
378 while (matchCode >= 4 * 255) {
380 LZ4_write32(op, 0xFFFFFFFF);
381 matchCode -= 4 * 255;
384 op += matchCode / 255;
385 *op++ = (BYTE)(matchCode % 255);
387 *token += (BYTE)(matchCode);
392 /* Test end of chunk */
397 LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
399 /* Test next position */
400 match = LZ4_getPosition(ip, dictPtr->hashTable,
403 if (dict == usingExtDict) {
404 if (match < (const BYTE *)source) {
405 refDelta = dictDelta;
406 lowLimit = dictionary;
409 lowLimit = (const BYTE *)source;
413 LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
415 if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
416 && (match + MAX_DISTANCE >= ip)
417 && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
423 /* Prepare next loop */
424 forwardH = LZ4_hashPosition(++ip, tableType);
428 /* Encode Last Literals */
430 size_t const lastRun = (size_t)(iend - anchor);
432 if ((outputLimited) &&
433 /* Check output buffer overflow */
434 ((op - (BYTE *)dest) + lastRun + 1 +
435 ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
438 if (lastRun >= RUN_MASK) {
439 size_t accumulator = lastRun - RUN_MASK;
440 *op++ = RUN_MASK << ML_BITS;
441 for (; accumulator >= 255; accumulator -= 255)
443 *op++ = (BYTE) accumulator;
445 *op++ = (BYTE)(lastRun << ML_BITS);
448 memcpy(op, anchor, lastRun);
454 return (int) (((char *)op) - dest);
457 static int LZ4_compress_fast_extState(
465 LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
467 const tableType_t tableType = byU32;
469 const tableType_t tableType = byPtr;
472 LZ4_resetStream((LZ4_stream_t *)state);
474 if (acceleration < 1)
475 acceleration = LZ4_ACCELERATION_DEFAULT;
477 if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
478 if (inputSize < LZ4_64Klimit)
479 return LZ4_compress_generic(ctx, source,
481 noLimit, byU16, noDict,
482 noDictIssue, acceleration);
484 return LZ4_compress_generic(ctx, source,
486 noLimit, tableType, noDict,
487 noDictIssue, acceleration);
489 if (inputSize < LZ4_64Klimit)
490 return LZ4_compress_generic(ctx, source,
492 maxOutputSize, limitedOutput, byU16, noDict,
493 noDictIssue, acceleration);
495 return LZ4_compress_generic(ctx, source,
497 maxOutputSize, limitedOutput, tableType, noDict,
498 noDictIssue, acceleration);
502 int LZ4_compress_fast(const char *source, char *dest, int inputSize,
503 int maxOutputSize, int acceleration, void *wrkmem)
505 return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
506 maxOutputSize, acceleration);
509 int LZ4_compress_default(const char *source, char *dest, int inputSize,
510 int maxOutputSize, void *wrkmem)
512 return LZ4_compress_fast(source, dest, inputSize,
513 maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
516 /*-******************************
517 * *_destSize() variant
518 ********************************/
519 static int LZ4_compress_destSize_generic(
520 LZ4_stream_t_internal * const ctx,
521 const char * const src,
523 int * const srcSizePtr,
524 const int targetDstSize,
525 const tableType_t tableType)
527 const BYTE *ip = (const BYTE *) src;
528 const BYTE *base = (const BYTE *) src;
529 const BYTE *lowLimit = (const BYTE *) src;
530 const BYTE *anchor = ip;
531 const BYTE * const iend = ip + *srcSizePtr;
532 const BYTE * const mflimit = iend - MFLIMIT;
533 const BYTE * const matchlimit = iend - LASTLITERALS;
535 BYTE *op = (BYTE *) dst;
536 BYTE * const oend = op + targetDstSize;
537 BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
538 - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
539 BYTE * const oMaxMatch = op + targetDstSize
540 - (LASTLITERALS + 1 /* token */);
541 BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
545 /* Init conditions */
546 /* Impossible to store anything */
547 if (targetDstSize < 1)
549 /* Unsupported input size, too large (or negative) */
550 if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
552 /* Size too large (not within 64K limit) */
553 if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
555 /* Input too small, no compression (all literals) */
556 if (*srcSizePtr < LZ4_minLength)
561 LZ4_putPosition(ip, ctx->hashTable, tableType, base);
562 ip++; forwardH = LZ4_hashPosition(ip, tableType);
571 const BYTE *forwardIp = ip;
572 unsigned int step = 1;
573 unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
580 step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
582 if (unlikely(forwardIp > mflimit))
585 match = LZ4_getPositionOnHash(h, ctx->hashTable,
587 forwardH = LZ4_hashPosition(forwardIp,
589 LZ4_putPositionOnHash(ip, h,
590 ctx->hashTable, tableType,
593 } while (((tableType == byU16)
595 : (match + MAX_DISTANCE < ip))
596 || (LZ4_read32(match) != LZ4_read32(ip)));
601 && (match > lowLimit)
602 && (unlikely(ip[-1] == match[-1]))) {
607 /* Encode Literal length */
609 unsigned int litLength = (unsigned int)(ip - anchor);
612 if (op + ((litLength + 240) / 255)
613 + litLength > oMaxLit) {
614 /* Not enough space for a last match */
618 if (litLength >= RUN_MASK) {
619 unsigned int len = litLength - RUN_MASK;
620 *token = (RUN_MASK<<ML_BITS);
621 for (; len >= 255; len -= 255)
625 *token = (BYTE)(litLength << ML_BITS);
628 LZ4_wildCopy(op, anchor, op + litLength);
634 LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
636 /* Encode MatchLength */
638 size_t matchLength = LZ4_count(ip + MINMATCH,
639 match + MINMATCH, matchlimit);
641 if (op + ((matchLength + 240)/255) > oMaxMatch) {
642 /* Match description too long : reduce it */
643 matchLength = (15 - 1) + (oMaxMatch - op) * 255;
645 ip += MINMATCH + matchLength;
647 if (matchLength >= ML_MASK) {
649 matchLength -= ML_MASK;
650 while (matchLength >= 255) {
654 *op++ = (BYTE)matchLength;
656 *token += (BYTE)(matchLength);
661 /* Test end of block */
668 LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
670 /* Test next position */
671 match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
672 LZ4_putPosition(ip, ctx->hashTable, tableType, base);
674 if ((match + MAX_DISTANCE >= ip)
675 && (LZ4_read32(match) == LZ4_read32(ip))) {
676 token = op++; *token = 0;
680 /* Prepare next loop */
681 forwardH = LZ4_hashPosition(++ip, tableType);
685 /* Encode Last Literals */
687 size_t lastRunSize = (size_t)(iend - anchor);
689 if (op + 1 /* token */
690 + ((lastRunSize + 240) / 255) /* litLength */
691 + lastRunSize /* literals */ > oend) {
692 /* adapt lastRunSize to fill 'dst' */
693 lastRunSize = (oend - op) - 1;
694 lastRunSize -= (lastRunSize + 240) / 255;
696 ip = anchor + lastRunSize;
698 if (lastRunSize >= RUN_MASK) {
699 size_t accumulator = lastRunSize - RUN_MASK;
701 *op++ = RUN_MASK << ML_BITS;
702 for (; accumulator >= 255; accumulator -= 255)
704 *op++ = (BYTE) accumulator;
706 *op++ = (BYTE)(lastRunSize<<ML_BITS);
708 memcpy(op, anchor, lastRunSize);
713 *srcSizePtr = (int) (((const char *)ip) - src);
714 return (int) (((char *)op) - dst);
717 static int LZ4_compress_destSize_extState(
725 const tableType_t tableType = byU32;
727 const tableType_t tableType = byPtr;
730 LZ4_resetStream(state);
732 if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
733 /* compression success is guaranteed */
734 return LZ4_compress_fast_extState(
735 state, src, dst, *srcSizePtr,
738 if (*srcSizePtr < LZ4_64Klimit)
739 return LZ4_compress_destSize_generic(
740 &state->internal_donotuse,
741 src, dst, srcSizePtr,
742 targetDstSize, byU16);
744 return LZ4_compress_destSize_generic(
745 &state->internal_donotuse,
746 src, dst, srcSizePtr,
747 targetDstSize, tableType);
752 int LZ4_compress_destSize(
759 return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
763 /*-******************************
764 * Streaming functions
765 ********************************/
766 void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
768 memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
771 int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
772 const char *dictionary, int dictSize)
774 LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
775 const BYTE *p = (const BYTE *)dictionary;
776 const BYTE * const dictEnd = p + dictSize;
779 if ((dict->initCheck)
780 || (dict->currentOffset > 1 * GB)) {
781 /* Uninitialized structure, or reuse overflow */
782 LZ4_resetStream(LZ4_dict);
785 if (dictSize < (int)HASH_UNIT) {
786 dict->dictionary = NULL;
791 if ((dictEnd - p) > 64 * KB)
792 p = dictEnd - 64 * KB;
793 dict->currentOffset += 64 * KB;
794 base = p - dict->currentOffset;
795 dict->dictionary = p;
796 dict->dictSize = (U32)(dictEnd - p);
797 dict->currentOffset += dict->dictSize;
799 while (p <= dictEnd - HASH_UNIT) {
800 LZ4_putPosition(p, dict->hashTable, byU32, base);
804 return dict->dictSize;
807 static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
810 if ((LZ4_dict->currentOffset > 0x80000000) ||
811 ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
812 /* address space overflow */
813 /* rescale hash table */
814 U32 const delta = LZ4_dict->currentOffset - 64 * KB;
815 const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
818 for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
819 if (LZ4_dict->hashTable[i] < delta)
820 LZ4_dict->hashTable[i] = 0;
822 LZ4_dict->hashTable[i] -= delta;
824 LZ4_dict->currentOffset = 64 * KB;
825 if (LZ4_dict->dictSize > 64 * KB)
826 LZ4_dict->dictSize = 64 * KB;
827 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
831 int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
833 LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
834 const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
836 if ((U32)dictSize > 64 * KB) {
837 /* useless to define a dictionary > 64 * KB */
840 if ((U32)dictSize > dict->dictSize)
841 dictSize = dict->dictSize;
843 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
845 dict->dictionary = (const BYTE *)safeBuffer;
846 dict->dictSize = (U32)dictSize;
851 int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
852 char *dest, int inputSize, int maxOutputSize, int acceleration)
854 LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
855 const BYTE * const dictEnd = streamPtr->dictionary
856 + streamPtr->dictSize;
858 const BYTE *smallest = (const BYTE *) source;
860 if (streamPtr->initCheck) {
861 /* Uninitialized structure detected */
865 if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
868 LZ4_renormDictT(streamPtr, smallest);
870 if (acceleration < 1)
871 acceleration = LZ4_ACCELERATION_DEFAULT;
873 /* Check overlapping input/dictionary space */
875 const BYTE *sourceEnd = (const BYTE *) source + inputSize;
877 if ((sourceEnd > streamPtr->dictionary)
878 && (sourceEnd < dictEnd)) {
879 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
880 if (streamPtr->dictSize > 64 * KB)
881 streamPtr->dictSize = 64 * KB;
882 if (streamPtr->dictSize < 4)
883 streamPtr->dictSize = 0;
884 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
888 /* prefix mode : source data follows dictionary */
889 if (dictEnd == (const BYTE *)source) {
892 if ((streamPtr->dictSize < 64 * KB) &&
893 (streamPtr->dictSize < streamPtr->currentOffset)) {
894 result = LZ4_compress_generic(
895 streamPtr, source, dest, inputSize,
896 maxOutputSize, limitedOutput, byU32,
897 withPrefix64k, dictSmall, acceleration);
899 result = LZ4_compress_generic(
900 streamPtr, source, dest, inputSize,
901 maxOutputSize, limitedOutput, byU32,
902 withPrefix64k, noDictIssue, acceleration);
904 streamPtr->dictSize += (U32)inputSize;
905 streamPtr->currentOffset += (U32)inputSize;
909 /* external dictionary mode */
913 if ((streamPtr->dictSize < 64 * KB) &&
914 (streamPtr->dictSize < streamPtr->currentOffset)) {
915 result = LZ4_compress_generic(
916 streamPtr, source, dest, inputSize,
917 maxOutputSize, limitedOutput, byU32,
918 usingExtDict, dictSmall, acceleration);
920 result = LZ4_compress_generic(
921 streamPtr, source, dest, inputSize,
922 maxOutputSize, limitedOutput, byU32,
923 usingExtDict, noDictIssue, acceleration);
925 streamPtr->dictionary = (const BYTE *)source;
926 streamPtr->dictSize = (U32)inputSize;
927 streamPtr->currentOffset += (U32)inputSize;