2017-09-11 23:25:50 +00:00
/*
LZ4 - Fast LZ compression algorithm
2022-08-16 12:43:50 +00:00
Copyright ( C ) 2011 - 2020 , Yann Collet .
2017-09-11 23:25:50 +00:00
BSD 2 - Clause License ( http : //www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions are
met :
* Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
* Redistributions in binary form must reproduce the above
copyright notice , this list of conditions and the following disclaimer
in the documentation and / or other materials provided with the
distribution .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
You can contact the author at :
- LZ4 homepage : http : //www.lz4.org
- LZ4 source repository : https : //github.com/lz4/lz4
*/
/*-************************************
* Tuning parameters
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* LZ4_HEAPMODE :
* Select how default compression functions will allocate memory for their hash table ,
* in memory stack ( 0 : default , fastest ) , or in memory heap ( 1 : requires malloc ( ) ) .
*/
# ifndef LZ4_HEAPMODE
# define LZ4_HEAPMODE 0
# endif
/*
2020-11-16 16:46:11 +00:00
* LZ4_ACCELERATION_DEFAULT :
2017-09-11 23:25:50 +00:00
* Select " acceleration " for LZ4_compress_fast ( ) when parameter value < = 0
*/
2020-11-16 16:46:11 +00:00
# define LZ4_ACCELERATION_DEFAULT 1
/*
* LZ4_ACCELERATION_MAX :
* Any " acceleration " value higher than this threshold
* get treated as LZ4_ACCELERATION_MAX instead ( fix # 876 )
*/
# define LZ4_ACCELERATION_MAX 65537
2017-09-11 23:25:50 +00:00
/*-************************************
* CPU Feature Detection
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* LZ4_FORCE_MEMORY_ACCESS
* By default , access to unaligned memory is controlled by ` memcpy ( ) ` , which is safe and portable .
* Unfortunately , on some target / compiler combinations , the generated assembly is sub - optimal .
* The below switch allow to select different access method for improved performance .
* Method 0 ( default ) : use ` memcpy ( ) ` . Safe and portable .
* Method 1 : ` __packed ` statement . It depends on compiler extension ( ie , not portable ) .
* This method is safe if your compiler supports it , and * generally * as fast or faster than ` memcpy ` .
* Method 2 : direct access . This method is portable but violate C standard .
* It can generate buggy code on targets which assembly generation depends on alignment .
* But in some circumstances , it ' s the only known way to get the most performance ( ie GCC + ARMv6 )
* See https : //fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order ( 0 > 1 > 2 )
*/
# ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
2018-05-07 23:52:40 +00:00
# if defined(__GNUC__) && \
( defined ( __ARM_ARCH_6__ ) | | defined ( __ARM_ARCH_6J__ ) | | defined ( __ARM_ARCH_6K__ ) \
| | defined ( __ARM_ARCH_6Z__ ) | | defined ( __ARM_ARCH_6ZK__ ) | | defined ( __ARM_ARCH_6T2__ ) )
2017-09-11 23:25:50 +00:00
# define LZ4_FORCE_MEMORY_ACCESS 2
2018-05-07 23:52:40 +00:00
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
2017-09-11 23:25:50 +00:00
# define LZ4_FORCE_MEMORY_ACCESS 1
# endif
# endif
/*
* LZ4_FORCE_SW_BITCOUNT
* Define this parameter if your target system or compiler does not support hardware bit count
*/
2018-05-07 23:52:40 +00:00
# if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
2020-11-16 16:46:11 +00:00
# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
2017-09-11 23:25:50 +00:00
# define LZ4_FORCE_SW_BITCOUNT
# endif
2018-03-04 14:23:46 +00:00
2017-09-11 23:25:50 +00:00
/*-************************************
* Dependency
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-05-01 14:53:48 +00:00
/*
* LZ4_SRC_INCLUDED :
* Amalgamation flag , whether lz4 . c is included
*/
# ifndef LZ4_SRC_INCLUDED
# define LZ4_SRC_INCLUDED 1
# endif
# ifndef LZ4_STATIC_LINKING_ONLY
2018-05-07 23:52:40 +00:00
# define LZ4_STATIC_LINKING_ONLY
2019-05-01 14:53:48 +00:00
# endif
# ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
2018-05-07 23:52:40 +00:00
# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
2019-05-01 14:53:48 +00:00
# endif
2020-11-16 16:46:11 +00:00
# define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
2020-11-16 17:13:19 +00:00
# include "tracy_lz4.hpp"
2017-09-11 23:25:50 +00:00
/* see also "memory routines" below */
/*-************************************
* Compiler Options
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2020-11-16 16:46:11 +00:00
# if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
# include <intrin.h> /* only present in VS2005+ */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
2022-08-16 12:43:50 +00:00
# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
2017-09-11 23:25:50 +00:00
# endif /* _MSC_VER */
# ifndef LZ4_FORCE_INLINE
2024-02-21 21:54:44 +00:00
# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */
2017-09-11 23:25:50 +00:00
# define LZ4_FORCE_INLINE static __forceinline
# else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
2024-02-21 21:54:44 +00:00
# if defined (__GNUC__) || defined (__clang__)
2017-09-11 23:25:50 +00:00
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
# else
# define LZ4_FORCE_INLINE static inline
# endif
# else
# define LZ4_FORCE_INLINE static
# endif /* __STDC_VERSION__ */
# endif /* _MSC_VER */
# endif /* LZ4_FORCE_INLINE */
2020-11-16 16:46:11 +00:00
/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
2019-05-01 14:53:48 +00:00
* gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8 ,
2018-03-04 14:23:46 +00:00
* together with a simple 8 - byte copy loop as a fall - back path .
* However , this optimization hurts the decompression speed by > 30 % ,
* because the execution does not go to the optimized loop
* for typical compressible data , and all of the preamble checks
* before going to the fall - back path become useless overhead .
* This optimization happens only with the - O3 flag , and - O2 generates
* a simple 8 - byte copy loop .
2019-05-01 14:53:48 +00:00
* With gcc on ppc64le , all of the LZ4_decompress_ * and LZ4_wildCopy8
2018-03-04 14:23:46 +00:00
* functions are annotated with __attribute__ ( ( optimize ( " O2 " ) ) ) ,
2019-05-01 14:53:48 +00:00
* and also LZ4_wildCopy8 is forcibly inlined , so that the O2 attribute
* of LZ4_wildCopy8 does not affect the compression speed .
2018-03-04 14:23:46 +00:00
*/
2019-05-01 14:53:48 +00:00
# if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
2020-11-16 16:46:11 +00:00
# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
# undef LZ4_FORCE_INLINE
# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
2018-03-04 14:23:46 +00:00
# else
2020-11-16 16:46:11 +00:00
# define LZ4_FORCE_O2
2018-03-04 14:23:46 +00:00
# endif
2017-09-11 23:25:50 +00:00
# if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
# else
# define expect(expr,value) (expr)
# endif
2018-05-07 23:52:40 +00:00
# ifndef likely
2017-09-11 23:25:50 +00:00
# define likely(expr) expect((expr) != 0, 1)
2018-05-07 23:52:40 +00:00
# endif
# ifndef unlikely
2017-09-11 23:25:50 +00:00
# define unlikely(expr) expect((expr) != 0, 0)
2018-05-07 23:52:40 +00:00
# endif
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
/* Should the alignment test prove unreliable, for some reason,
* it can be disabled by setting LZ4_ALIGN_TEST to 0 */
# ifndef LZ4_ALIGN_TEST /* can be externally provided */
# define LZ4_ALIGN_TEST 1
# endif
2017-09-11 23:25:50 +00:00
/*-************************************
* Memory routines
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2022-08-16 12:43:50 +00:00
/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :
* Disable relatively high - level LZ4 / HC functions that use dynamic memory
* allocation functions ( malloc ( ) , calloc ( ) , free ( ) ) .
*
* Note that this is a compile - time switch . And since it disables
* public / stable LZ4 v1 API functions , we don ' t recommend using this
* symbol to generate a library for distribution .
*
* The following public functions are removed when this symbol is defined .
* - lz4 : LZ4_createStream , LZ4_freeStream ,
* LZ4_createStreamDecode , LZ4_freeStreamDecode , LZ4_create ( deprecated )
* - lz4hc : LZ4_createStreamHC , LZ4_freeStreamHC ,
* LZ4_createHC ( deprecated ) , LZ4_freeHC ( deprecated )
* - lz4frame , lz4file : All LZ4F_ * functions
*/
# if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
# define ALLOC(s) lz4_error_memory_allocation_is_disabled
# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
# define FREEMEM(p) lz4_error_memory_allocation_is_disabled
# elif defined(LZ4_USER_MEMORY_FUNCTIONS)
2020-11-16 16:46:11 +00:00
/* memory management functions can be customized by user project.
* Below functions must exist somewhere in the Project
* and be available at link time */
void * LZ4_malloc ( size_t s ) ;
void * LZ4_calloc ( size_t n , size_t s ) ;
void LZ4_free ( void * p ) ;
# define ALLOC(s) LZ4_malloc(s)
# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
# define FREEMEM(p) LZ4_free(p)
# else
# include <stdlib.h> /* malloc, calloc, free */
# define ALLOC(s) malloc(s)
# define ALLOC_AND_ZERO(s) calloc(1,s)
# define FREEMEM(p) free(p)
# endif
2022-08-16 12:43:50 +00:00
# if ! LZ4_FREESTANDING
# include <string.h> /* memset, memcpy */
# endif
# if !defined(LZ4_memset)
# define LZ4_memset(p,v,s) memset((p),(v),(s))
# endif
# define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s))
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
/*-************************************
* Common Constants
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define MINMATCH 4
# define WILDCOPYLENGTH 8
# define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
# define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
# define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
# define FASTLOOP_SAFE_DISTANCE 64
static const int LZ4_minLength = ( MFLIMIT + 1 ) ;
# define KB *(1 <<10)
# define MB *(1 <<20)
# define GB *(1U<<30)
# define LZ4_DISTANCE_ABSOLUTE_MAX 65535
# if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
# endif
# define ML_BITS 4
# define ML_MASK ((1U<<ML_BITS)-1)
# define RUN_BITS (8-ML_BITS)
# define RUN_MASK ((1U<<RUN_BITS)-1)
/*-************************************
* Error detection
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
# include <assert.h>
# else
# ifndef assert
# define assert(condition) ((void)0)
# endif
# endif
# define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1 / (int)(!!(c)) }; } /* use after variable declarations */
# if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
# include <stdio.h>
static int g_debuglog_enable = 1 ;
# define DEBUGLOG(l, ...) { \
if ( ( g_debuglog_enable ) & & ( l < = LZ4_DEBUG ) ) { \
fprintf ( stderr , __FILE__ " : " ) ; \
fprintf ( stderr , __VA_ARGS__ ) ; \
fprintf ( stderr , " \n " ) ; \
} }
# else
# define DEBUGLOG(l, ...) {} /* disabled */
# endif
static int LZ4_isAligned ( const void * ptr , size_t alignment )
{
return ( ( size_t ) ptr & ( alignment - 1 ) ) = = 0 ;
}
2017-09-11 23:25:50 +00:00
/*-************************************
2019-05-01 14:53:48 +00:00
* Types
2017-09-11 23:25:50 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2020-11-16 16:46:11 +00:00
# include <limits.h>
2017-09-11 23:25:50 +00:00
# if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ )
2018-09-12 23:52:16 +00:00
# include <stdint.h>
2017-09-11 23:25:50 +00:00
typedef uint8_t BYTE ;
typedef uint16_t U16 ;
typedef uint32_t U32 ;
typedef int32_t S32 ;
typedef uint64_t U64 ;
typedef uintptr_t uptrval ;
# else
2020-11-16 16:46:11 +00:00
# if UINT_MAX != 4294967295UL
# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
# endif
2017-09-11 23:25:50 +00:00
typedef unsigned char BYTE ;
typedef unsigned short U16 ;
typedef unsigned int U32 ;
typedef signed int S32 ;
typedef unsigned long long U64 ;
typedef size_t uptrval ; /* generally true, except OpenVMS-64 */
# endif
# if defined(__x86_64__)
typedef U64 reg_t ; /* 64-bits in x32 mode */
# else
typedef size_t reg_t ; /* 32-bits in x32 mode */
# endif
2019-05-01 14:53:48 +00:00
typedef enum {
notLimited = 0 ,
limitedOutput = 1 ,
fillOutput = 2
} limitedOutput_directive ;
2020-11-16 17:13:19 +00:00
namespace tracy
{
2019-05-01 14:53:48 +00:00
2017-09-11 23:25:50 +00:00
/*-************************************
* Reading and writing into memory
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2020-11-16 16:46:11 +00:00
/**
* LZ4 relies on memcpy with a constant size being inlined . In freestanding
* environments , the compiler can ' t assume the implementation of memcpy ( ) is
* standard compliant , so it can ' t apply its specialized memcpy ( ) inlining
* logic . When possible , use __builtin_memcpy ( ) to tell the compiler to analyze
* memcpy ( ) as if it were standard compliant , so it can inline it in freestanding
* environments . This is needed when decompressing the Linux Kernel , for example .
*/
2022-08-16 12:43:50 +00:00
# if !defined(LZ4_memcpy)
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
# else
# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
# endif
# endif
# if !defined(LZ4_memmove)
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define LZ4_memmove __builtin_memmove
# else
# define LZ4_memmove memmove
# endif
2020-11-16 16:46:11 +00:00
# endif
2017-09-11 23:25:50 +00:00
static unsigned LZ4_isLittleEndian ( void )
{
const union { U32 u ; BYTE c [ 4 ] ; } one = { 1 } ; /* don't use static : performance detrimental */
return one . c [ 0 ] ;
}
# if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
/* lie to the compiler about data alignment; use with caution */
2020-11-16 16:46:11 +00:00
static U16 LZ4_read16 ( const void * memPtr ) { return * ( const U16 * ) memPtr ; }
static U32 LZ4_read32 ( const void * memPtr ) { return * ( const U32 * ) memPtr ; }
static reg_t LZ4_read_ARCH ( const void * memPtr ) { return * ( const reg_t * ) memPtr ; }
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
static void LZ4_write16 ( void * memPtr , U16 value ) { * ( U16 * ) memPtr = value ; }
static void LZ4_write32 ( void * memPtr , U32 value ) { * ( U32 * ) memPtr = value ; }
2017-09-11 23:25:50 +00:00
# elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
2022-08-16 12:43:50 +00:00
typedef union { U16 u16 ; U32 u32 ; reg_t uArch ; } __attribute__ ( ( packed ) ) LZ4_unalign ;
2017-09-11 23:25:50 +00:00
2022-08-16 12:43:50 +00:00
static U16 LZ4_read16 ( const void * ptr ) { return ( ( const LZ4_unalign * ) ptr ) - > u16 ; }
static U32 LZ4_read32 ( const void * ptr ) { return ( ( const LZ4_unalign * ) ptr ) - > u32 ; }
static reg_t LZ4_read_ARCH ( const void * ptr ) { return ( ( const LZ4_unalign * ) ptr ) - > uArch ; }
2017-09-11 23:25:50 +00:00
2022-08-16 12:43:50 +00:00
static void LZ4_write16 ( void * memPtr , U16 value ) { ( ( LZ4_unalign * ) memPtr ) - > u16 = value ; }
static void LZ4_write32 ( void * memPtr , U32 value ) { ( ( LZ4_unalign * ) memPtr ) - > u32 = value ; }
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
# else /* safe and portable access using memcpy() */
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
static U16 LZ4_read16 ( const void * memPtr )
2017-09-11 23:25:50 +00:00
{
2020-11-16 16:46:11 +00:00
U16 val ; LZ4_memcpy ( & val , memPtr , sizeof ( val ) ) ; return val ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
static U32 LZ4_read32 ( const void * memPtr )
2017-09-11 23:25:50 +00:00
{
2020-11-16 16:46:11 +00:00
U32 val ; LZ4_memcpy ( & val , memPtr , sizeof ( val ) ) ; return val ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
static reg_t LZ4_read_ARCH ( const void * memPtr )
2017-09-11 23:25:50 +00:00
{
2020-11-16 16:46:11 +00:00
reg_t val ; LZ4_memcpy ( & val , memPtr , sizeof ( val ) ) ; return val ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
static void LZ4_write16 ( void * memPtr , U16 value )
2017-09-11 23:25:50 +00:00
{
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( memPtr , & value , sizeof ( value ) ) ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
static void LZ4_write32 ( void * memPtr , U32 value )
2017-09-11 23:25:50 +00:00
{
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( memPtr , & value , sizeof ( value ) ) ;
2017-09-11 23:25:50 +00:00
}
# endif /* LZ4_FORCE_MEMORY_ACCESS */
2020-11-16 16:46:11 +00:00
static U16 LZ4_readLE16 ( const void * memPtr )
2017-09-11 23:25:50 +00:00
{
if ( LZ4_isLittleEndian ( ) ) {
return LZ4_read16 ( memPtr ) ;
} else {
const BYTE * p = ( const BYTE * ) memPtr ;
return ( U16 ) ( ( U16 ) p [ 0 ] + ( p [ 1 ] < < 8 ) ) ;
}
}
2020-11-16 16:46:11 +00:00
static void LZ4_writeLE16 ( void * memPtr , U16 value )
2017-09-11 23:25:50 +00:00
{
if ( LZ4_isLittleEndian ( ) ) {
LZ4_write16 ( memPtr , value ) ;
} else {
BYTE * p = ( BYTE * ) memPtr ;
p [ 0 ] = ( BYTE ) value ;
p [ 1 ] = ( BYTE ) ( value > > 8 ) ;
}
}
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
2020-11-16 16:46:11 +00:00
LZ4_FORCE_INLINE
2019-05-01 14:53:48 +00:00
void LZ4_wildCopy8 ( void * dstPtr , const void * srcPtr , void * dstEnd )
2017-09-11 23:25:50 +00:00
{
BYTE * d = ( BYTE * ) dstPtr ;
const BYTE * s = ( const BYTE * ) srcPtr ;
BYTE * const e = ( BYTE * ) dstEnd ;
2020-11-16 16:46:11 +00:00
do { LZ4_memcpy ( d , s , 8 ) ; d + = 8 ; s + = 8 ; } while ( d < e ) ;
2017-09-11 23:25:50 +00:00
}
2019-05-01 14:53:48 +00:00
static const unsigned inc32table [ 8 ] = { 0 , 1 , 2 , 1 , 0 , 4 , 4 , 4 } ;
static const int dec64table [ 8 ] = { 0 , 0 , 0 , - 1 , - 4 , 1 , 2 , 3 } ;
# ifndef LZ4_FAST_DEC_LOOP
2020-11-16 16:46:11 +00:00
# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
# define LZ4_FAST_DEC_LOOP 1
2022-08-16 12:43:50 +00:00
# elif defined(__aarch64__) && defined(__APPLE__)
# define LZ4_FAST_DEC_LOOP 1
2020-11-16 16:46:11 +00:00
# elif defined(__aarch64__) && !defined(__clang__)
2022-08-16 12:43:50 +00:00
/* On non-Apple aarch64, we disable this optimization for clang because
* on certain mobile chipsets , performance is reduced with clang . For
* more information refer to https : //github.com/lz4/lz4/pull/707 */
2019-05-01 14:53:48 +00:00
# define LZ4_FAST_DEC_LOOP 1
# else
# define LZ4_FAST_DEC_LOOP 0
# endif
# endif
# if LZ4_FAST_DEC_LOOP
2020-11-16 16:46:11 +00:00
LZ4_FORCE_INLINE void
2019-05-01 14:53:48 +00:00
LZ4_memcpy_using_offset_base ( BYTE * dstPtr , const BYTE * srcPtr , BYTE * dstEnd , const size_t offset )
{
2020-11-16 16:46:11 +00:00
assert ( srcPtr + offset = = dstPtr ) ;
2019-05-01 14:53:48 +00:00
if ( offset < 8 ) {
2020-11-16 16:46:11 +00:00
LZ4_write32 ( dstPtr , 0 ) ; /* silence an msan warning when offset==0 */
2019-05-01 14:53:48 +00:00
dstPtr [ 0 ] = srcPtr [ 0 ] ;
dstPtr [ 1 ] = srcPtr [ 1 ] ;
dstPtr [ 2 ] = srcPtr [ 2 ] ;
dstPtr [ 3 ] = srcPtr [ 3 ] ;
srcPtr + = inc32table [ offset ] ;
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( dstPtr + 4 , srcPtr , 4 ) ;
2019-05-01 14:53:48 +00:00
srcPtr - = dec64table [ offset ] ;
dstPtr + = 8 ;
} else {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( dstPtr , srcPtr , 8 ) ;
2019-05-01 14:53:48 +00:00
dstPtr + = 8 ;
srcPtr + = 8 ;
}
LZ4_wildCopy8 ( dstPtr , srcPtr , dstEnd ) ;
}
/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
* this version copies two times 16 bytes ( instead of one time 32 bytes )
* because it must be compatible with offsets > = 16. */
2020-11-16 16:46:11 +00:00
LZ4_FORCE_INLINE void
2019-05-01 14:53:48 +00:00
LZ4_wildCopy32 ( void * dstPtr , const void * srcPtr , void * dstEnd )
{
BYTE * d = ( BYTE * ) dstPtr ;
const BYTE * s = ( const BYTE * ) srcPtr ;
BYTE * const e = ( BYTE * ) dstEnd ;
2020-11-16 16:46:11 +00:00
do { LZ4_memcpy ( d , s , 16 ) ; LZ4_memcpy ( d + 16 , s + 16 , 16 ) ; d + = 32 ; s + = 32 ; } while ( d < e ) ;
2019-05-01 14:53:48 +00:00
}
2020-11-16 16:46:11 +00:00
/* LZ4_memcpy_using_offset() presumes :
* - dstEnd > = dstPtr + MINMATCH
* - there is at least 8 bytes available to write after dstEnd */
LZ4_FORCE_INLINE void
2019-05-01 14:53:48 +00:00
LZ4_memcpy_using_offset ( BYTE * dstPtr , const BYTE * srcPtr , BYTE * dstEnd , const size_t offset )
{
BYTE v [ 8 ] ;
2020-11-16 16:46:11 +00:00
assert ( dstEnd > = dstPtr + MINMATCH ) ;
2019-05-01 14:53:48 +00:00
switch ( offset ) {
case 1 :
2020-11-16 16:46:11 +00:00
MEM_INIT ( v , * srcPtr , 8 ) ;
break ;
2019-05-01 14:53:48 +00:00
case 2 :
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( v , srcPtr , 2 ) ;
LZ4_memcpy ( & v [ 2 ] , srcPtr , 2 ) ;
2022-08-16 12:43:50 +00:00
# if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
# pragma warning(push)
# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
# endif
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( & v [ 4 ] , v , 4 ) ;
2022-08-16 12:43:50 +00:00
# if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
# pragma warning(pop)
# endif
2020-11-16 16:46:11 +00:00
break ;
2019-05-01 14:53:48 +00:00
case 4 :
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( v , srcPtr , 4 ) ;
LZ4_memcpy ( & v [ 4 ] , srcPtr , 4 ) ;
break ;
2019-05-01 14:53:48 +00:00
default :
LZ4_memcpy_using_offset_base ( dstPtr , srcPtr , dstEnd , offset ) ;
return ;
}
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( dstPtr , v , 8 ) ;
2019-05-01 14:53:48 +00:00
dstPtr + = 8 ;
while ( dstPtr < dstEnd ) {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( dstPtr , v , 8 ) ;
2019-05-01 14:53:48 +00:00
dstPtr + = 8 ;
}
}
# endif
2017-09-11 23:25:50 +00:00
/*-************************************
* Common functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2021-04-09 16:18:15 +00:00
LZ4_FORCE_INLINE unsigned LZ4_NbCommonBytes ( reg_t val )
2017-09-11 23:25:50 +00:00
{
2020-11-16 16:46:11 +00:00
assert ( val ! = 0 ) ;
2017-09-11 23:25:50 +00:00
if ( LZ4_isLittleEndian ( ) ) {
2020-11-16 16:46:11 +00:00
if ( sizeof ( val ) = = 8 ) {
2022-08-16 12:43:50 +00:00
# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)
/*-*************************************************************************************************
* ARM64EC is a Microsoft - designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
* The ARM64EC ABI does not support AVX / AVX2 / AVX512 instructions , nor their relevant intrinsics
* including _tzcnt_u64 . Therefore , we need to neuter the _tzcnt_u64 code path for ARM64EC .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# if defined(__clang__) && (__clang_major__ < 10)
/* Avoid undefined clang-cl intrinsics issue.
* See https : //github.com/lz4/lz4/pull/1017 for details. */
return ( unsigned ) __builtin_ia32_tzcnt_u64 ( val ) > > 3 ;
# else
2020-11-16 16:46:11 +00:00
/* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
return ( unsigned ) _tzcnt_u64 ( val ) > > 3 ;
2022-08-16 12:43:50 +00:00
# endif
2020-11-16 16:46:11 +00:00
# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0 ;
_BitScanForward64 ( & r , ( U64 ) val ) ;
return ( unsigned ) r > > 3 ;
# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
( ( __GNUC__ = = 3 ) & & ( __GNUC_MINOR__ > = 4 ) ) ) ) ) & & \
! defined ( LZ4_FORCE_SW_BITCOUNT )
return ( unsigned ) __builtin_ctzll ( ( U64 ) val ) > > 3 ;
2017-09-11 23:25:50 +00:00
# else
2020-11-16 16:46:11 +00:00
const U64 m = 0x0101010101010101ULL ;
val ^ = val - 1 ;
return ( unsigned ) ( ( ( U64 ) ( ( val & ( m - 1 ) ) * m ) ) > > 56 ) ;
2017-09-11 23:25:50 +00:00
# endif
} else /* 32 bits */ {
2020-11-16 16:46:11 +00:00
# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r ;
_BitScanForward ( & r , ( U32 ) val ) ;
return ( unsigned ) r > > 3 ;
# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
( ( __GNUC__ = = 3 ) & & ( __GNUC_MINOR__ > = 4 ) ) ) ) ) & & \
! defined ( __TINYC__ ) & & ! defined ( LZ4_FORCE_SW_BITCOUNT )
return ( unsigned ) __builtin_ctz ( ( U32 ) val ) > > 3 ;
2017-09-11 23:25:50 +00:00
# else
2020-11-16 16:46:11 +00:00
const U32 m = 0x01010101 ;
return ( unsigned ) ( ( ( ( val - 1 ) ^ val ) & ( m - 1 ) ) * m ) > > 24 ;
2017-09-11 23:25:50 +00:00
# endif
}
} else /* Big Endian CPU */ {
2020-11-16 16:46:11 +00:00
if ( sizeof ( val ) = = 8 ) {
# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
( ( __GNUC__ = = 3 ) & & ( __GNUC_MINOR__ > = 4 ) ) ) ) ) & & \
! defined ( __TINYC__ ) & & ! defined ( LZ4_FORCE_SW_BITCOUNT )
return ( unsigned ) __builtin_clzll ( ( U64 ) val ) > > 3 ;
2017-09-11 23:25:50 +00:00
# else
2020-11-16 16:46:11 +00:00
# if 1
/* this method is probably faster,
* but adds a 128 bytes lookup table */
static const unsigned char ctz7_tab [ 128 ] = {
7 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
5 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
6 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
5 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
4 , 0 , 1 , 0 , 2 , 0 , 1 , 0 , 3 , 0 , 1 , 0 , 2 , 0 , 1 , 0 ,
} ;
U64 const mask = 0x0101010101010101ULL ;
U64 const t = ( ( ( val > > 8 ) - mask ) | val ) & mask ;
return ctz7_tab [ ( t * 0x0080402010080402ULL ) > > 57 ] ;
# else
/* this method doesn't consume memory space like the previous one,
* but it contains several branches ,
* that may end up slowing execution */
2018-03-04 14:23:46 +00:00
static const U32 by32 = sizeof ( val ) * 4 ; /* 32 on 64 bits (goal), 16 on 32 bits.
2020-11-16 16:46:11 +00:00
Just to avoid some static analyzer complaining about shift by 32 on 32 - bits target .
Note that this code path is never triggered in 32 - bits mode . */
2017-09-11 23:25:50 +00:00
unsigned r ;
2018-03-04 14:23:46 +00:00
if ( ! ( val > > by32 ) ) { r = 4 ; } else { r = 0 ; val > > = by32 ; }
2017-09-11 23:25:50 +00:00
if ( ! ( val > > 16 ) ) { r + = 2 ; val > > = 8 ; } else { val > > = 24 ; }
r + = ( ! val ) ;
return r ;
2020-11-16 16:46:11 +00:00
# endif
2017-09-11 23:25:50 +00:00
# endif
} else /* 32 bits */ {
2020-11-16 16:46:11 +00:00
# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
( ( __GNUC__ = = 3 ) & & ( __GNUC_MINOR__ > = 4 ) ) ) ) ) & & \
! defined ( LZ4_FORCE_SW_BITCOUNT )
return ( unsigned ) __builtin_clz ( ( U32 ) val ) > > 3 ;
2017-09-11 23:25:50 +00:00
# else
2020-11-16 16:46:11 +00:00
val > > = 8 ;
val = ( ( ( ( val + 0x00FFFF00 ) | 0x00FFFFFF ) + val ) |
( val + 0x00FF0000 ) ) > > 24 ;
return ( unsigned ) val ^ 3 ;
2017-09-11 23:25:50 +00:00
# endif
}
}
}
2020-11-16 16:46:11 +00:00
2017-09-11 23:25:50 +00:00
# define STEPSIZE sizeof(reg_t)
2018-03-04 14:23:46 +00:00
LZ4_FORCE_INLINE
unsigned LZ4_count ( const BYTE * pIn , const BYTE * pMatch , const BYTE * pInLimit )
2017-09-11 23:25:50 +00:00
{
const BYTE * const pStart = pIn ;
2018-03-04 14:23:46 +00:00
if ( likely ( pIn < pInLimit - ( STEPSIZE - 1 ) ) ) {
reg_t const diff = LZ4_read_ARCH ( pMatch ) ^ LZ4_read_ARCH ( pIn ) ;
if ( ! diff ) {
pIn + = STEPSIZE ; pMatch + = STEPSIZE ;
} else {
return LZ4_NbCommonBytes ( diff ) ;
} }
while ( likely ( pIn < pInLimit - ( STEPSIZE - 1 ) ) ) {
2017-09-11 23:25:50 +00:00
reg_t const diff = LZ4_read_ARCH ( pMatch ) ^ LZ4_read_ARCH ( pIn ) ;
if ( ! diff ) { pIn + = STEPSIZE ; pMatch + = STEPSIZE ; continue ; }
pIn + = LZ4_NbCommonBytes ( diff ) ;
return ( unsigned ) ( pIn - pStart ) ;
}
if ( ( STEPSIZE = = 8 ) & & ( pIn < ( pInLimit - 3 ) ) & & ( LZ4_read32 ( pMatch ) = = LZ4_read32 ( pIn ) ) ) { pIn + = 4 ; pMatch + = 4 ; }
if ( ( pIn < ( pInLimit - 1 ) ) & & ( LZ4_read16 ( pMatch ) = = LZ4_read16 ( pIn ) ) ) { pIn + = 2 ; pMatch + = 2 ; }
if ( ( pIn < pInLimit ) & & ( * pMatch = = * pIn ) ) pIn + + ;
return ( unsigned ) ( pIn - pStart ) ;
}
# ifndef LZ4_COMMONDEFS_ONLY
/*-************************************
* Local Constants
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static const int LZ4_64Klimit = ( ( 64 KB ) + ( MFLIMIT - 1 ) ) ;
static const U32 LZ4_skipTrigger = 6 ; /* Increase this value ==> compression run slower on incompressible data */
/*-************************************
* Local Structures and types
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2018-05-07 23:52:40 +00:00
typedef enum { clearedTable = 0 , byPtr , byU32 , byU16 } tableType_t ;
/**
* This enum distinguishes several different modes of accessing previous
* content in the stream .
*
* - noDict : There is no preceding content .
* - withPrefix64k : Table entries up to ctx - > dictSize before the current blob
* blob being compressed are valid and refer to the preceding
* content ( of length ctx - > dictSize ) , which is available
* contiguously preceding in memory the content currently
* being compressed .
* - usingExtDict : Like withPrefix64k , but the preceding content is somewhere
* else in memory , starting at ctx - > dictionary with length
* ctx - > dictSize .
2022-08-16 12:43:50 +00:00
* - usingDictCtx : Everything concerning the preceding content is
* in a separate context , pointed to by ctx - > dictCtx .
* ctx - > dictionary , ctx - > dictSize , and table entries
* in the current context that refer to positions
2018-05-07 23:52:40 +00:00
* preceding the beginning of the current compression are
* ignored . Instead , ctx - > dictCtx - > dictionary and ctx - > dictCtx
* - > dictSize describe the location and size of the preceding
* content , and matches are found by looking in the ctx
* - > dictCtx - > hashTable .
*/
typedef enum { noDict = 0 , withPrefix64k , usingExtDict , usingDictCtx } dict_directive ;
2017-09-11 23:25:50 +00:00
typedef enum { noDictIssue = 0 , dictSmall } dictIssue_directive ;
/*-************************************
* Local Utils
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
int LZ4_versionNumber ( void ) { return LZ4_VERSION_NUMBER ; }
const char * LZ4_versionString ( void ) { return LZ4_VERSION_STRING ; }
int LZ4_compressBound ( int isize ) { return LZ4_COMPRESSBOUND ( isize ) ; }
2022-08-16 12:43:50 +00:00
int LZ4_sizeofState ( void ) { return sizeof ( LZ4_stream_t ) ; }
2017-09-11 23:25:50 +00:00
2022-08-16 12:43:50 +00:00
/*-****************************************
* Internal Definitions , used only in Tests
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-05-01 14:53:48 +00:00
2020-11-16 16:46:11 +00:00
int LZ4_compress_forceExtDict ( LZ4_stream_t * LZ4_dict , const char * source , char * dest , int srcSize ) ;
2018-09-12 23:52:16 +00:00
2020-11-16 16:46:11 +00:00
int LZ4_decompress_safe_forceExtDict ( const char * source , char * dest ,
int compressedSize , int maxOutputSize ,
const void * dictStart , size_t dictSize ) ;
2022-08-16 12:43:50 +00:00
int LZ4_decompress_safe_partial_forceExtDict ( const char * source , char * dest ,
int compressedSize , int targetOutputSize , int dstCapacity ,
const void * dictStart , size_t dictSize ) ;
2020-11-16 16:46:11 +00:00
2017-09-11 23:25:50 +00:00
/*-******************************
* Compression functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2020-01-17 00:08:06 +00:00
LZ4_FORCE_INLINE U32 LZ4_hash4 ( U32 sequence , tableType_t const tableType )
2017-09-11 23:25:50 +00:00
{
if ( tableType = = byU16 )
return ( ( sequence * 2654435761U ) > > ( ( MINMATCH * 8 ) - ( LZ4_HASHLOG + 1 ) ) ) ;
else
return ( ( sequence * 2654435761U ) > > ( ( MINMATCH * 8 ) - LZ4_HASHLOG ) ) ;
}
2020-01-17 00:08:06 +00:00
LZ4_FORCE_INLINE U32 LZ4_hash5 ( U64 sequence , tableType_t const tableType )
2017-09-11 23:25:50 +00:00
{
const U32 hashLog = ( tableType = = byU16 ) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG ;
2019-05-01 14:53:48 +00:00
if ( LZ4_isLittleEndian ( ) ) {
const U64 prime5bytes = 889523592379ULL ;
2017-09-11 23:25:50 +00:00
return ( U32 ) ( ( ( sequence < < 24 ) * prime5bytes ) > > ( 64 - hashLog ) ) ;
2019-05-01 14:53:48 +00:00
} else {
const U64 prime8bytes = 11400714785074694791ULL ;
2017-09-11 23:25:50 +00:00
return ( U32 ) ( ( ( sequence > > 24 ) * prime8bytes ) > > ( 64 - hashLog ) ) ;
2019-05-01 14:53:48 +00:00
}
2017-09-11 23:25:50 +00:00
}
LZ4_FORCE_INLINE U32 LZ4_hashPosition ( const void * const p , tableType_t const tableType )
{
if ( ( sizeof ( reg_t ) = = 8 ) & & ( tableType ! = byU16 ) ) return LZ4_hash5 ( LZ4_read_ARCH ( p ) , tableType ) ;
return LZ4_hash4 ( LZ4_read32 ( p ) , tableType ) ;
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_INLINE void LZ4_clearHash ( U32 h , void * tableBase , tableType_t const tableType )
{
switch ( tableType )
{
default : /* fallthrough */
case clearedTable : { /* illegal! */ assert ( 0 ) ; return ; }
case byPtr : { const BYTE * * hashTable = ( const BYTE * * ) tableBase ; hashTable [ h ] = NULL ; return ; }
case byU32 : { U32 * hashTable = ( U32 * ) tableBase ; hashTable [ h ] = 0 ; return ; }
case byU16 : { U16 * hashTable = ( U16 * ) tableBase ; hashTable [ h ] = 0 ; return ; }
}
}
2020-01-17 00:08:06 +00:00
LZ4_FORCE_INLINE void LZ4_putIndexOnHash ( U32 idx , U32 h , void * tableBase , tableType_t const tableType )
2018-05-07 23:52:40 +00:00
{
switch ( tableType )
{
default : /* fallthrough */
case clearedTable : /* fallthrough */
case byPtr : { /* illegal! */ assert ( 0 ) ; return ; }
case byU32 : { U32 * hashTable = ( U32 * ) tableBase ; hashTable [ h ] = idx ; return ; }
case byU16 : { U16 * hashTable = ( U16 * ) tableBase ; assert ( idx < 65536 ) ; hashTable [ h ] = ( U16 ) idx ; return ; }
}
}
2020-01-17 00:08:06 +00:00
LZ4_FORCE_INLINE void LZ4_putPositionOnHash ( const BYTE * p , U32 h ,
2018-05-07 23:52:40 +00:00
void * tableBase , tableType_t const tableType ,
const BYTE * srcBase )
2017-09-11 23:25:50 +00:00
{
switch ( tableType )
{
2018-05-07 23:52:40 +00:00
case clearedTable : { /* illegal! */ assert ( 0 ) ; return ; }
2017-09-11 23:25:50 +00:00
case byPtr : { const BYTE * * hashTable = ( const BYTE * * ) tableBase ; hashTable [ h ] = p ; return ; }
case byU32 : { U32 * hashTable = ( U32 * ) tableBase ; hashTable [ h ] = ( U32 ) ( p - srcBase ) ; return ; }
case byU16 : { U16 * hashTable = ( U16 * ) tableBase ; hashTable [ h ] = ( U16 ) ( p - srcBase ) ; return ; }
}
}
LZ4_FORCE_INLINE void LZ4_putPosition ( const BYTE * p , void * tableBase , tableType_t tableType , const BYTE * srcBase )
{
U32 const h = LZ4_hashPosition ( p , tableType ) ;
LZ4_putPositionOnHash ( p , h , tableBase , tableType , srcBase ) ;
}
2018-05-07 23:52:40 +00:00
/* LZ4_getIndexOnHash() :
* Index of match position registered in hash table .
* hash position must be calculated by using base + index , or dictBase + index .
* Assumption 1 : only valid if tableType = = byU32 or byU16 .
* Assumption 2 : h is presumed valid ( within limits of hash table )
*/
2020-01-17 00:08:06 +00:00
LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash ( U32 h , const void * tableBase , tableType_t tableType )
2018-05-07 23:52:40 +00:00
{
LZ4_STATIC_ASSERT ( LZ4_MEMORY_USAGE > 2 ) ;
if ( tableType = = byU32 ) {
const U32 * const hashTable = ( const U32 * ) tableBase ;
assert ( h < ( 1U < < ( LZ4_MEMORY_USAGE - 2 ) ) ) ;
return hashTable [ h ] ;
}
if ( tableType = = byU16 ) {
const U16 * const hashTable = ( const U16 * ) tableBase ;
assert ( h < ( 1U < < ( LZ4_MEMORY_USAGE - 1 ) ) ) ;
return hashTable [ h ] ;
}
assert ( 0 ) ; return 0 ; /* forbidden case */
}
2020-11-16 16:46:11 +00:00
static const BYTE * LZ4_getPositionOnHash ( U32 h , const void * tableBase , tableType_t tableType , const BYTE * srcBase )
2017-09-11 23:25:50 +00:00
{
2018-05-07 23:52:40 +00:00
if ( tableType = = byPtr ) { const BYTE * const * hashTable = ( const BYTE * const * ) tableBase ; return hashTable [ h ] ; }
if ( tableType = = byU32 ) { const U32 * const hashTable = ( const U32 * ) tableBase ; return hashTable [ h ] + srcBase ; }
{ const U16 * const hashTable = ( const U16 * ) tableBase ; return hashTable [ h ] + srcBase ; } /* default, to ensure a return */
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_INLINE const BYTE *
LZ4_getPosition ( const BYTE * p ,
const void * tableBase , tableType_t tableType ,
const BYTE * srcBase )
2017-09-11 23:25:50 +00:00
{
U32 const h = LZ4_hashPosition ( p , tableType ) ;
return LZ4_getPositionOnHash ( h , tableBase , tableType , srcBase ) ;
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_INLINE void
LZ4_prepareTable ( LZ4_stream_t_internal * const cctx ,
const int inputSize ,
const tableType_t tableType ) {
2018-05-07 23:52:40 +00:00
/* If the table hasn't been used, it's guaranteed to be zeroed out, and is
* therefore safe to use no matter what mode we ' re in . Otherwise , we figure
* out if it ' s safe to leave as is or whether it needs to be reset .
*/
2020-11-16 16:46:11 +00:00
if ( ( tableType_t ) cctx - > tableType ! = clearedTable ) {
assert ( inputSize > = 0 ) ;
if ( ( tableType_t ) cctx - > tableType ! = tableType
| | ( ( tableType = = byU16 ) & & cctx - > currentOffset + ( unsigned ) inputSize > = 0xFFFFU )
| | ( ( tableType = = byU32 ) & & cctx - > currentOffset > 1 GB )
2018-05-07 23:52:40 +00:00
| | tableType = = byPtr
| | inputSize > = 4 KB )
{
DEBUGLOG ( 4 , " LZ4_prepareTable: Resetting table in %p " , cctx ) ;
MEM_INIT ( cctx - > hashTable , 0 , LZ4_HASHTABLESIZE ) ;
cctx - > currentOffset = 0 ;
2020-11-16 16:46:11 +00:00
cctx - > tableType = ( U32 ) clearedTable ;
2018-05-07 23:52:40 +00:00
} else {
DEBUGLOG ( 4 , " LZ4_prepareTable: Re-use hash table (no reset) " ) ;
}
}
2022-08-16 12:43:50 +00:00
/* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
* is faster than compressing without a gap .
* However , compressing with currentOffset = = 0 is faster still ,
* so we preserve that case .
2018-05-07 23:52:40 +00:00
*/
if ( cctx - > currentOffset ! = 0 & & tableType = = byU32 ) {
DEBUGLOG ( 5 , " LZ4_prepareTable: adding 64KB to currentOffset " ) ;
cctx - > currentOffset + = 64 KB ;
}
/* Finally, clear history */
cctx - > dictCtx = NULL ;
cctx - > dictionary = NULL ;
cctx - > dictSize = 0 ;
}
2017-09-11 23:25:50 +00:00
/** LZ4_compress_generic() :
2020-11-16 16:46:11 +00:00
* inlined , to ensure branches are decided at compilation time .
* Presumed already validated at this stage :
* - source ! = NULL
* - inputSize > 0
*/
LZ4_FORCE_INLINE int LZ4_compress_generic_validated (
2017-09-11 23:25:50 +00:00
LZ4_stream_t_internal * const cctx ,
const char * const source ,
char * const dest ,
const int inputSize ,
2022-08-16 12:43:50 +00:00
int * inputConsumed , /* only written when outputDirective == fillOutput */
2017-09-11 23:25:50 +00:00
const int maxOutputSize ,
2019-05-01 14:53:48 +00:00
const limitedOutput_directive outputDirective ,
2017-09-11 23:25:50 +00:00
const tableType_t tableType ,
2018-05-07 23:52:40 +00:00
const dict_directive dictDirective ,
2017-09-11 23:25:50 +00:00
const dictIssue_directive dictIssue ,
2019-05-01 14:53:48 +00:00
const int acceleration )
2017-09-11 23:25:50 +00:00
{
2019-05-01 14:53:48 +00:00
int result ;
2017-09-11 23:25:50 +00:00
const BYTE * ip = ( const BYTE * ) source ;
2018-05-07 23:52:40 +00:00
U32 const startIndex = cctx - > currentOffset ;
const BYTE * base = ( const BYTE * ) source - startIndex ;
2017-09-11 23:25:50 +00:00
const BYTE * lowLimit ;
2018-05-07 23:52:40 +00:00
const LZ4_stream_t_internal * dictCtx = ( const LZ4_stream_t_internal * ) cctx - > dictCtx ;
const BYTE * const dictionary =
dictDirective = = usingDictCtx ? dictCtx - > dictionary : cctx - > dictionary ;
const U32 dictSize =
dictDirective = = usingDictCtx ? dictCtx - > dictSize : cctx - > dictSize ;
const U32 dictDelta = ( dictDirective = = usingDictCtx ) ? startIndex - dictCtx - > currentOffset : 0 ; /* make indexes in dictCtx comparable with index in current context */
int const maybe_extMem = ( dictDirective = = usingExtDict ) | | ( dictDirective = = usingDictCtx ) ;
U32 const prefixIdxLimit = startIndex - dictSize ; /* used when dictDirective == dictSmall */
2020-11-16 16:46:11 +00:00
const BYTE * const dictEnd = dictionary ? dictionary + dictSize : dictionary ;
2017-09-11 23:25:50 +00:00
const BYTE * anchor = ( const BYTE * ) source ;
const BYTE * const iend = ip + inputSize ;
2018-05-07 23:52:40 +00:00
const BYTE * const mflimitPlusOne = iend - MFLIMIT + 1 ;
2017-09-11 23:25:50 +00:00
const BYTE * const matchlimit = iend - LASTLITERALS ;
2018-05-07 23:52:40 +00:00
/* the dictCtx currentOffset is indexed on the start of the dictionary,
* while a dictionary in the current context precedes the currentOffset */
2022-08-16 12:43:50 +00:00
const BYTE * dictBase = ( dictionary = = NULL ) ? NULL :
( dictDirective = = usingDictCtx ) ?
2018-09-12 23:52:16 +00:00
dictionary + dictSize - dictCtx - > currentOffset :
dictionary + dictSize - startIndex ;
2018-05-07 23:52:40 +00:00
2017-09-11 23:25:50 +00:00
BYTE * op = ( BYTE * ) dest ;
BYTE * const olimit = op + maxOutputSize ;
2018-05-07 23:52:40 +00:00
U32 offset = 0 ;
2017-09-11 23:25:50 +00:00
U32 forwardH ;
2020-11-16 16:46:11 +00:00
DEBUGLOG ( 5 , " LZ4_compress_generic_validated: srcSize=%i, tableType=%u " , inputSize , tableType ) ;
assert ( ip ! = NULL ) ;
2019-05-01 14:53:48 +00:00
/* If init conditions are not met, we don't have to mark stream
* as having dirty context , since no action was taken yet */
2020-11-16 16:46:11 +00:00
if ( outputDirective = = fillOutput & & maxOutputSize < 1 ) { return 0 ; } /* Impossible to store anything */
if ( ( tableType = = byU16 ) & & ( inputSize > = LZ4_64Klimit ) ) { return 0 ; } /* Size too large (not within 64K limit) */
2018-05-07 23:52:40 +00:00
if ( tableType = = byPtr ) assert ( dictDirective = = noDict ) ; /* only supported use case with byPtr */
assert ( acceleration > = 1 ) ;
lowLimit = ( const BYTE * ) source - ( dictDirective = = withPrefix64k ? dictSize : 0 ) ;
/* Update context state */
if ( dictDirective = = usingDictCtx ) {
/* Subsequent linked blocks can't use the dictionary. */
/* Instead, they use the block we just compressed. */
cctx - > dictCtx = NULL ;
cctx - > dictSize = ( U32 ) inputSize ;
} else {
cctx - > dictSize + = ( U32 ) inputSize ;
2017-09-11 23:25:50 +00:00
}
2018-05-07 23:52:40 +00:00
cctx - > currentOffset + = ( U32 ) inputSize ;
2020-11-16 16:46:11 +00:00
cctx - > tableType = ( U32 ) tableType ;
2018-05-07 23:52:40 +00:00
if ( inputSize < LZ4_minLength ) goto _last_literals ; /* Input too small, no compression (all literals) */
2017-09-11 23:25:50 +00:00
/* First Byte */
LZ4_putPosition ( ip , cctx - > hashTable , tableType , base ) ;
ip + + ; forwardH = LZ4_hashPosition ( ip , tableType ) ;
/* Main Loop */
for ( ; ; ) {
const BYTE * match ;
BYTE * token ;
2020-11-16 16:46:11 +00:00
const BYTE * filledIp ;
2017-09-11 23:25:50 +00:00
/* Find a match */
2018-05-07 23:52:40 +00:00
if ( tableType = = byPtr ) {
const BYTE * forwardIp = ip ;
2019-05-01 14:53:48 +00:00
int step = 1 ;
int searchMatchNb = acceleration < < LZ4_skipTrigger ;
2017-09-11 23:25:50 +00:00
do {
U32 const h = forwardH ;
ip = forwardIp ;
forwardIp + = step ;
step = ( searchMatchNb + + > > LZ4_skipTrigger ) ;
2018-05-07 23:52:40 +00:00
if ( unlikely ( forwardIp > mflimitPlusOne ) ) goto _last_literals ;
assert ( ip < mflimitPlusOne ) ;
2017-09-11 23:25:50 +00:00
match = LZ4_getPositionOnHash ( h , cctx - > hashTable , tableType , base ) ;
2018-05-07 23:52:40 +00:00
forwardH = LZ4_hashPosition ( forwardIp , tableType ) ;
LZ4_putPositionOnHash ( ip , h , cctx - > hashTable , tableType , base ) ;
2019-05-01 14:53:48 +00:00
} while ( ( match + LZ4_DISTANCE_MAX < ip )
2018-05-07 23:52:40 +00:00
| | ( LZ4_read32 ( match ) ! = LZ4_read32 ( ip ) ) ) ;
} else { /* byU32, byU16 */
const BYTE * forwardIp = ip ;
2019-05-01 14:53:48 +00:00
int step = 1 ;
int searchMatchNb = acceleration < < LZ4_skipTrigger ;
2018-05-07 23:52:40 +00:00
do {
U32 const h = forwardH ;
U32 const current = ( U32 ) ( forwardIp - base ) ;
U32 matchIndex = LZ4_getIndexOnHash ( h , cctx - > hashTable , tableType ) ;
assert ( matchIndex < = current ) ;
assert ( forwardIp - base < ( ptrdiff_t ) ( 2 GB - 1 ) ) ;
ip = forwardIp ;
forwardIp + = step ;
step = ( searchMatchNb + + > > LZ4_skipTrigger ) ;
if ( unlikely ( forwardIp > mflimitPlusOne ) ) goto _last_literals ;
assert ( ip < mflimitPlusOne ) ;
if ( dictDirective = = usingDictCtx ) {
if ( matchIndex < startIndex ) {
/* there was no match, try the dictionary */
assert ( tableType = = byU32 ) ;
matchIndex = LZ4_getIndexOnHash ( h , dictCtx - > hashTable , byU32 ) ;
match = dictBase + matchIndex ;
matchIndex + = dictDelta ; /* make dictCtx index comparable with current context */
lowLimit = dictionary ;
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ;
}
2022-08-16 12:43:50 +00:00
} else if ( dictDirective = = usingExtDict ) {
2018-05-07 23:52:40 +00:00
if ( matchIndex < startIndex ) {
DEBUGLOG ( 7 , " extDict candidate: matchIndex=%5u < startIndex=%5u " , matchIndex , startIndex ) ;
assert ( startIndex - matchIndex > = MINMATCH ) ;
2022-08-16 12:43:50 +00:00
assert ( dictBase ) ;
2018-05-07 23:52:40 +00:00
match = dictBase + matchIndex ;
2017-09-11 23:25:50 +00:00
lowLimit = dictionary ;
} else {
2018-05-07 23:52:40 +00:00
match = base + matchIndex ;
2017-09-11 23:25:50 +00:00
lowLimit = ( const BYTE * ) source ;
2018-05-07 23:52:40 +00:00
}
} else { /* single continuous memory segment */
match = base + matchIndex ;
}
2017-09-11 23:25:50 +00:00
forwardH = LZ4_hashPosition ( forwardIp , tableType ) ;
2018-05-07 23:52:40 +00:00
LZ4_putIndexOnHash ( current , h , cctx - > hashTable , tableType ) ;
2020-11-16 16:46:11 +00:00
DEBUGLOG ( 7 , " candidate at pos=%u (offset=%u \n " , matchIndex , current - matchIndex ) ;
if ( ( dictIssue = = dictSmall ) & & ( matchIndex < prefixIdxLimit ) ) { continue ; } /* match outside of valid area */
2018-05-07 23:52:40 +00:00
assert ( matchIndex < current ) ;
2020-11-16 16:46:11 +00:00
if ( ( ( tableType ! = byU16 ) | | ( LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX ) )
& & ( matchIndex + LZ4_DISTANCE_MAX < current ) ) {
continue ;
} /* too far */
assert ( ( current - matchIndex ) < = LZ4_DISTANCE_MAX ) ; /* match now expected within distance */
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
if ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) {
if ( maybe_extMem ) offset = current - matchIndex ;
break ; /* match found */
}
} while ( 1 ) ;
2017-09-11 23:25:50 +00:00
}
/* Catch up */
2020-11-16 16:46:11 +00:00
filledIp = ip ;
2018-05-07 23:52:40 +00:00
while ( ( ( ip > anchor ) & ( match > lowLimit ) ) & & ( unlikely ( ip [ - 1 ] = = match [ - 1 ] ) ) ) { ip - - ; match - - ; }
2017-09-11 23:25:50 +00:00
/* Encode Literals */
{ unsigned const litLength = ( unsigned ) ( ip - anchor ) ;
token = op + + ;
2019-05-01 14:53:48 +00:00
if ( ( outputDirective = = limitedOutput ) & & /* Check output buffer overflow */
2020-11-16 16:46:11 +00:00
( unlikely ( op + litLength + ( 2 + 1 + LASTLITERALS ) + ( litLength / 255 ) > olimit ) ) ) {
2019-05-01 14:53:48 +00:00
return 0 ; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
2020-11-16 16:46:11 +00:00
}
2019-05-01 14:53:48 +00:00
if ( ( outputDirective = = fillOutput ) & &
2018-05-07 23:52:40 +00:00
( unlikely ( op + ( litLength + 240 ) / 255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit ) ) ) {
op - - ;
goto _last_literals ;
}
2017-09-11 23:25:50 +00:00
if ( litLength > = RUN_MASK ) {
2019-05-01 14:53:48 +00:00
int len = ( int ) ( litLength - RUN_MASK ) ;
2017-09-11 23:25:50 +00:00
* token = ( RUN_MASK < < ML_BITS ) ;
for ( ; len > = 255 ; len - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) len ;
}
else * token = ( BYTE ) ( litLength < < ML_BITS ) ;
/* Copy Literals */
2019-05-01 14:53:48 +00:00
LZ4_wildCopy8 ( op , anchor , op + litLength ) ;
2017-09-11 23:25:50 +00:00
op + = litLength ;
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 6 , " seq.start:%i, literals=%u, match.start:%i " ,
( int ) ( anchor - ( const BYTE * ) source ) , litLength , ( int ) ( ip - ( const BYTE * ) source ) ) ;
2017-09-11 23:25:50 +00:00
}
_next_match :
2018-05-07 23:52:40 +00:00
/* at this stage, the following variables must be correctly set :
* - ip : at start of LZ operation
2022-08-16 12:43:50 +00:00
* - match : at start of previous pattern occurrence ; can be within current prefix , or within extDict
2018-05-07 23:52:40 +00:00
* - offset : if maybe_ext_memSegment = = 1 ( constant )
* - lowLimit : must be = = dictionary to mean " match is within extDict " ; must be = = source otherwise
* - token and * token : position to write 4 - bits for match length ; higher 4 - bits for literal length supposed already written
*/
2019-05-01 14:53:48 +00:00
if ( ( outputDirective = = fillOutput ) & &
2018-05-07 23:52:40 +00:00
( op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit ) ) {
/* the match was too close to the end, rewind and go to last literals */
op = token ;
goto _last_literals ;
}
2017-09-11 23:25:50 +00:00
/* Encode Offset */
2018-05-07 23:52:40 +00:00
if ( maybe_extMem ) { /* static test */
DEBUGLOG ( 6 , " with offset=%u (ext if > %i) " , offset , ( int ) ( ip - ( const BYTE * ) source ) ) ;
2019-05-01 14:53:48 +00:00
assert ( offset < = LZ4_DISTANCE_MAX & & offset > 0 ) ;
2018-05-07 23:52:40 +00:00
LZ4_writeLE16 ( op , ( U16 ) offset ) ; op + = 2 ;
} else {
DEBUGLOG ( 6 , " with offset=%u (same segment) " , ( U32 ) ( ip - match ) ) ;
2019-05-01 14:53:48 +00:00
assert ( ip - match < = LZ4_DISTANCE_MAX ) ;
2018-05-07 23:52:40 +00:00
LZ4_writeLE16 ( op , ( U16 ) ( ip - match ) ) ; op + = 2 ;
}
2017-09-11 23:25:50 +00:00
/* Encode MatchLength */
{ unsigned matchCode ;
2018-05-07 23:52:40 +00:00
if ( ( dictDirective = = usingExtDict | | dictDirective = = usingDictCtx )
& & ( lowLimit = = dictionary ) /* match within extDict */ ) {
const BYTE * limit = ip + ( dictEnd - match ) ;
assert ( dictEnd > match ) ;
2017-09-11 23:25:50 +00:00
if ( limit > matchlimit ) limit = matchlimit ;
matchCode = LZ4_count ( ip + MINMATCH , match + MINMATCH , limit ) ;
2019-05-01 14:53:48 +00:00
ip + = ( size_t ) matchCode + MINMATCH ;
2017-09-11 23:25:50 +00:00
if ( ip = = limit ) {
2018-05-07 23:52:40 +00:00
unsigned const more = LZ4_count ( limit , ( const BYTE * ) source , matchlimit ) ;
2017-09-11 23:25:50 +00:00
matchCode + = more ;
ip + = more ;
}
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 6 , " with matchLength=%u starting in extDict " , matchCode + MINMATCH ) ;
2017-09-11 23:25:50 +00:00
} else {
matchCode = LZ4_count ( ip + MINMATCH , match + MINMATCH , matchlimit ) ;
2019-05-01 14:53:48 +00:00
ip + = ( size_t ) matchCode + MINMATCH ;
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 6 , " with matchLength=%u " , matchCode + MINMATCH ) ;
2017-09-11 23:25:50 +00:00
}
2019-05-01 14:53:48 +00:00
if ( ( outputDirective ) & & /* Check output buffer overflow */
2020-11-16 16:46:11 +00:00
( unlikely ( op + ( 1 + LASTLITERALS ) + ( matchCode + 240 ) / 255 > olimit ) ) ) {
2019-05-01 14:53:48 +00:00
if ( outputDirective = = fillOutput ) {
2018-05-07 23:52:40 +00:00
/* Match description too long : reduce it */
2020-11-16 16:46:11 +00:00
U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ( ( U32 ) ( olimit - op ) - 1 - LASTLITERALS ) * 255 ;
2018-05-07 23:52:40 +00:00
ip - = matchCode - newMatchCode ;
2020-11-16 16:46:11 +00:00
assert ( newMatchCode < matchCode ) ;
2018-05-07 23:52:40 +00:00
matchCode = newMatchCode ;
2020-11-16 16:46:11 +00:00
if ( unlikely ( ip < = filledIp ) ) {
/* We have already filled up to filledIp so if ip ends up less than filledIp
* we have positions in the hash table beyond the current position . This is
* a problem if we reuse the hash table . So we have to remove these positions
* from the hash table .
*/
const BYTE * ptr ;
DEBUGLOG ( 5 , " Clearing %u positions " , ( U32 ) ( filledIp - ip ) ) ;
for ( ptr = ip ; ptr < = filledIp ; + + ptr ) {
U32 const h = LZ4_hashPosition ( ptr , tableType ) ;
LZ4_clearHash ( h , cctx - > hashTable , tableType ) ;
}
}
2019-05-01 14:53:48 +00:00
} else {
assert ( outputDirective = = limitedOutput ) ;
return 0 ; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
2018-05-07 23:52:40 +00:00
}
}
2017-09-11 23:25:50 +00:00
if ( matchCode > = ML_MASK ) {
* token + = ML_MASK ;
matchCode - = ML_MASK ;
LZ4_write32 ( op , 0xFFFFFFFF ) ;
while ( matchCode > = 4 * 255 ) {
op + = 4 ;
LZ4_write32 ( op , 0xFFFFFFFF ) ;
matchCode - = 4 * 255 ;
}
op + = matchCode / 255 ;
* op + + = ( BYTE ) ( matchCode % 255 ) ;
} else
* token + = ( BYTE ) ( matchCode ) ;
}
2020-11-16 16:46:11 +00:00
/* Ensure we have enough space for the last literals. */
assert ( ! ( outputDirective = = fillOutput & & op + 1 + LASTLITERALS > olimit ) ) ;
2017-09-11 23:25:50 +00:00
anchor = ip ;
/* Test end of chunk */
2018-05-07 23:52:40 +00:00
if ( ip > = mflimitPlusOne ) break ;
2017-09-11 23:25:50 +00:00
/* Fill table */
LZ4_putPosition ( ip - 2 , cctx - > hashTable , tableType , base ) ;
/* Test next position */
2018-05-07 23:52:40 +00:00
if ( tableType = = byPtr ) {
match = LZ4_getPosition ( ip , cctx - > hashTable , tableType , base ) ;
LZ4_putPosition ( ip , cctx - > hashTable , tableType , base ) ;
2019-05-01 14:53:48 +00:00
if ( ( match + LZ4_DISTANCE_MAX > = ip )
2018-05-07 23:52:40 +00:00
& & ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) )
{ token = op + + ; * token = 0 ; goto _next_match ; }
} else { /* byU32, byU16 */
U32 const h = LZ4_hashPosition ( ip , tableType ) ;
U32 const current = ( U32 ) ( ip - base ) ;
U32 matchIndex = LZ4_getIndexOnHash ( h , cctx - > hashTable , tableType ) ;
assert ( matchIndex < current ) ;
if ( dictDirective = = usingDictCtx ) {
if ( matchIndex < startIndex ) {
/* there was no match, try the dictionary */
matchIndex = LZ4_getIndexOnHash ( h , dictCtx - > hashTable , byU32 ) ;
match = dictBase + matchIndex ;
lowLimit = dictionary ; /* required for match length counter */
matchIndex + = dictDelta ;
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ; /* required for match length counter */
}
} else if ( dictDirective = = usingExtDict ) {
if ( matchIndex < startIndex ) {
2022-08-16 12:43:50 +00:00
assert ( dictBase ) ;
2018-05-07 23:52:40 +00:00
match = dictBase + matchIndex ;
lowLimit = dictionary ; /* required for match length counter */
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ; /* required for match length counter */
}
} else { /* single memory segment */
match = base + matchIndex ;
}
LZ4_putIndexOnHash ( current , h , cctx - > hashTable , tableType ) ;
assert ( matchIndex < current ) ;
if ( ( ( dictIssue = = dictSmall ) ? ( matchIndex > = prefixIdxLimit ) : 1 )
2020-11-16 16:46:11 +00:00
& & ( ( ( tableType = = byU16 ) & & ( LZ4_DISTANCE_MAX = = LZ4_DISTANCE_ABSOLUTE_MAX ) ) ? 1 : ( matchIndex + LZ4_DISTANCE_MAX > = current ) )
2018-05-07 23:52:40 +00:00
& & ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) ) {
token = op + + ;
* token = 0 ;
if ( maybe_extMem ) offset = current - matchIndex ;
DEBUGLOG ( 6 , " seq.start:%i, literals=%u, match.start:%i " ,
( int ) ( anchor - ( const BYTE * ) source ) , 0 , ( int ) ( ip - ( const BYTE * ) source ) ) ;
goto _next_match ;
}
}
2017-09-11 23:25:50 +00:00
/* Prepare next loop */
forwardH = LZ4_hashPosition ( + + ip , tableType ) ;
2018-05-07 23:52:40 +00:00
2017-09-11 23:25:50 +00:00
}
_last_literals :
/* Encode Last Literals */
2018-05-07 23:52:40 +00:00
{ size_t lastRun = ( size_t ) ( iend - anchor ) ;
2019-05-01 14:53:48 +00:00
if ( ( outputDirective ) & & /* Check output buffer overflow */
2018-05-07 23:52:40 +00:00
( op + lastRun + 1 + ( ( lastRun + 255 - RUN_MASK ) / 255 ) > olimit ) ) {
2019-05-01 14:53:48 +00:00
if ( outputDirective = = fillOutput ) {
2018-05-07 23:52:40 +00:00
/* adapt lastRun to fill 'dst' */
2019-05-01 14:53:48 +00:00
assert ( olimit > = op ) ;
2020-11-16 16:46:11 +00:00
lastRun = ( size_t ) ( olimit - op ) - 1 /*token*/ ;
lastRun - = ( lastRun + 256 - RUN_MASK ) / 256 ; /*additional length tokens*/
2019-05-01 14:53:48 +00:00
} else {
assert ( outputDirective = = limitedOutput ) ;
return 0 ; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
2018-05-07 23:52:40 +00:00
}
}
2020-11-16 16:46:11 +00:00
DEBUGLOG ( 6 , " Final literal run : %i literals " , ( int ) lastRun ) ;
2017-09-11 23:25:50 +00:00
if ( lastRun > = RUN_MASK ) {
size_t accumulator = lastRun - RUN_MASK ;
* op + + = RUN_MASK < < ML_BITS ;
for ( ; accumulator > = 255 ; accumulator - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) accumulator ;
} else {
* op + + = ( BYTE ) ( lastRun < < ML_BITS ) ;
}
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , anchor , lastRun ) ;
2018-05-07 23:52:40 +00:00
ip = anchor + lastRun ;
2017-09-11 23:25:50 +00:00
op + = lastRun ;
}
2019-05-01 14:53:48 +00:00
if ( outputDirective = = fillOutput ) {
2018-05-07 23:52:40 +00:00
* inputConsumed = ( int ) ( ( ( const char * ) ip ) - source ) ;
}
2019-05-01 14:53:48 +00:00
result = ( int ) ( ( ( char * ) op ) - dest ) ;
assert ( result > 0 ) ;
2020-11-16 16:46:11 +00:00
DEBUGLOG ( 5 , " LZ4_compress_generic: compressed %i bytes into %i bytes " , inputSize , result ) ;
2019-05-01 14:53:48 +00:00
return result ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
/** LZ4_compress_generic() :
* inlined , to ensure branches are decided at compilation time ;
* takes care of src = = ( NULL , 0 )
* and forward the rest to LZ4_compress_generic_validated */
LZ4_FORCE_INLINE int LZ4_compress_generic (
LZ4_stream_t_internal * const cctx ,
const char * const src ,
char * const dst ,
const int srcSize ,
int * inputConsumed , /* only written when outputDirective == fillOutput */
const int dstCapacity ,
const limitedOutput_directive outputDirective ,
const tableType_t tableType ,
const dict_directive dictDirective ,
const dictIssue_directive dictIssue ,
const int acceleration )
{
DEBUGLOG ( 5 , " LZ4_compress_generic: srcSize=%i, dstCapacity=%i " ,
srcSize , dstCapacity ) ;
if ( ( U32 ) srcSize > ( U32 ) LZ4_MAX_INPUT_SIZE ) { return 0 ; } /* Unsupported srcSize, too large (or negative) */
if ( srcSize = = 0 ) { /* src == NULL supported if srcSize == 0 */
if ( outputDirective ! = notLimited & & dstCapacity < = 0 ) return 0 ; /* no output, can't write anything */
DEBUGLOG ( 5 , " Generating an empty block " ) ;
assert ( outputDirective = = notLimited | | dstCapacity > = 1 ) ;
assert ( dst ! = NULL ) ;
dst [ 0 ] = 0 ;
if ( outputDirective = = fillOutput ) {
assert ( inputConsumed ! = NULL ) ;
* inputConsumed = 0 ;
}
return 1 ;
}
assert ( src ! = NULL ) ;
return LZ4_compress_generic_validated ( cctx , src , dst , srcSize ,
inputConsumed , /* only written into if outputDirective == fillOutput */
dstCapacity , outputDirective ,
tableType , dictDirective , dictIssue , acceleration ) ;
}
2017-09-11 23:25:50 +00:00
int LZ4_compress_fast_extState ( void * state , const char * source , char * dest , int inputSize , int maxOutputSize , int acceleration )
{
2019-05-01 14:53:48 +00:00
LZ4_stream_t_internal * const ctx = & LZ4_initStream ( state , sizeof ( LZ4_stream_t ) ) - > internal_donotuse ;
assert ( ctx ! = NULL ) ;
2020-11-16 16:46:11 +00:00
if ( acceleration < 1 ) acceleration = LZ4_ACCELERATION_DEFAULT ;
if ( acceleration > LZ4_ACCELERATION_MAX ) acceleration = LZ4_ACCELERATION_MAX ;
2017-09-11 23:25:50 +00:00
if ( maxOutputSize > = LZ4_compressBound ( inputSize ) ) {
2018-05-07 23:52:40 +00:00
if ( inputSize < LZ4_64Klimit ) {
return LZ4_compress_generic ( ctx , source , dest , inputSize , NULL , 0 , notLimited , byU16 , noDict , noDictIssue , acceleration ) ;
} else {
2019-05-01 14:53:48 +00:00
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) source > LZ4_DISTANCE_MAX ) ) ? byPtr : byU32 ;
2018-05-07 23:52:40 +00:00
return LZ4_compress_generic ( ctx , source , dest , inputSize , NULL , 0 , notLimited , tableType , noDict , noDictIssue , acceleration ) ;
}
2017-09-11 23:25:50 +00:00
} else {
2020-11-16 16:46:11 +00:00
if ( inputSize < LZ4_64Klimit ) {
2018-05-07 23:52:40 +00:00
return LZ4_compress_generic ( ctx , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , byU16 , noDict , noDictIssue , acceleration ) ;
} else {
2019-05-01 14:53:48 +00:00
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) source > LZ4_DISTANCE_MAX ) ) ? byPtr : byU32 ;
2018-05-07 23:52:40 +00:00
return LZ4_compress_generic ( ctx , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , noDict , noDictIssue , acceleration ) ;
}
}
}
/**
* LZ4_compress_fast_extState_fastReset ( ) :
* A variant of LZ4_compress_fast_extState ( ) .
*
* Using this variant avoids an expensive initialization step . It is only safe
* to call if the state buffer is known to be correctly initialized already
* ( see comment in lz4 . h on LZ4_resetStream_fast ( ) for a definition of
* " correctly initialized " ) .
*/
int LZ4_compress_fast_extState_fastReset ( void * state , const char * src , char * dst , int srcSize , int dstCapacity , int acceleration )
{
LZ4_stream_t_internal * ctx = & ( ( LZ4_stream_t * ) state ) - > internal_donotuse ;
2020-11-16 16:46:11 +00:00
if ( acceleration < 1 ) acceleration = LZ4_ACCELERATION_DEFAULT ;
if ( acceleration > LZ4_ACCELERATION_MAX ) acceleration = LZ4_ACCELERATION_MAX ;
2018-05-07 23:52:40 +00:00
if ( dstCapacity > = LZ4_compressBound ( srcSize ) ) {
if ( srcSize < LZ4_64Klimit ) {
const tableType_t tableType = byU16 ;
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
if ( ctx - > currentOffset ) {
return LZ4_compress_generic ( ctx , src , dst , srcSize , NULL , 0 , notLimited , tableType , noDict , dictSmall , acceleration ) ;
} else {
return LZ4_compress_generic ( ctx , src , dst , srcSize , NULL , 0 , notLimited , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
2019-05-01 14:53:48 +00:00
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) src > LZ4_DISTANCE_MAX ) ) ? byPtr : byU32 ;
2018-05-07 23:52:40 +00:00
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
return LZ4_compress_generic ( ctx , src , dst , srcSize , NULL , 0 , notLimited , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
if ( srcSize < LZ4_64Klimit ) {
const tableType_t tableType = byU16 ;
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
if ( ctx - > currentOffset ) {
return LZ4_compress_generic ( ctx , src , dst , srcSize , NULL , dstCapacity , limitedOutput , tableType , noDict , dictSmall , acceleration ) ;
} else {
return LZ4_compress_generic ( ctx , src , dst , srcSize , NULL , dstCapacity , limitedOutput , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
2019-05-01 14:53:48 +00:00
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) src > LZ4_DISTANCE_MAX ) ) ? byPtr : byU32 ;
2018-05-07 23:52:40 +00:00
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
return LZ4_compress_generic ( ctx , src , dst , srcSize , NULL , dstCapacity , limitedOutput , tableType , noDict , noDictIssue , acceleration ) ;
}
2017-09-11 23:25:50 +00:00
}
}
int LZ4_compress_fast ( const char * source , char * dest , int inputSize , int maxOutputSize , int acceleration )
{
2018-05-07 23:52:40 +00:00
int result ;
2017-09-11 23:25:50 +00:00
# if (LZ4_HEAPMODE)
2022-08-16 12:43:50 +00:00
LZ4_stream_t * ctxPtr = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ; /* malloc-calloc always properly aligned */
2018-05-07 23:52:40 +00:00
if ( ctxPtr = = NULL ) return 0 ;
2017-09-11 23:25:50 +00:00
# else
LZ4_stream_t ctx ;
2018-05-07 23:52:40 +00:00
LZ4_stream_t * const ctxPtr = & ctx ;
2017-09-11 23:25:50 +00:00
# endif
2018-05-07 23:52:40 +00:00
result = LZ4_compress_fast_extState ( ctxPtr , source , dest , inputSize , maxOutputSize , acceleration ) ;
2017-09-11 23:25:50 +00:00
# if (LZ4_HEAPMODE)
FREEMEM ( ctxPtr ) ;
# endif
return result ;
}
2019-05-01 14:53:48 +00:00
int LZ4_compress_default ( const char * src , char * dst , int srcSize , int maxOutputSize )
2017-09-11 23:25:50 +00:00
{
2019-05-01 14:53:48 +00:00
return LZ4_compress_fast ( src , dst , srcSize , maxOutputSize , 1 ) ;
2017-09-11 23:25:50 +00:00
}
2018-05-07 23:52:40 +00:00
/* Note!: This function leaves the stream in an unclean/broken state!
* It is not safe to subsequently use the same state with a _fastReset ( ) or
* _continue ( ) call without resetting it . */
2017-09-11 23:25:50 +00:00
static int LZ4_compress_destSize_extState ( LZ4_stream_t * state , const char * src , char * dst , int * srcSizePtr , int targetDstSize )
{
2019-05-01 14:53:48 +00:00
void * const s = LZ4_initStream ( state , sizeof ( * state ) ) ;
assert ( s ! = NULL ) ; ( void ) s ;
2017-09-11 23:25:50 +00:00
if ( targetDstSize > = LZ4_compressBound ( * srcSizePtr ) ) { /* compression success is guaranteed */
return LZ4_compress_fast_extState ( state , src , dst , * srcSizePtr , targetDstSize , 1 ) ;
} else {
2018-05-07 23:52:40 +00:00
if ( * srcSizePtr < LZ4_64Klimit ) {
return LZ4_compress_generic ( & state - > internal_donotuse , src , dst , * srcSizePtr , srcSizePtr , targetDstSize , fillOutput , byU16 , noDict , noDictIssue , 1 ) ;
} else {
2019-05-01 14:53:48 +00:00
tableType_t const addrMode = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) src > LZ4_DISTANCE_MAX ) ) ? byPtr : byU32 ;
return LZ4_compress_generic ( & state - > internal_donotuse , src , dst , * srcSizePtr , srcSizePtr , targetDstSize , fillOutput , addrMode , noDict , noDictIssue , 1 ) ;
2018-05-07 23:52:40 +00:00
} }
2017-09-11 23:25:50 +00:00
}
int LZ4_compress_destSize ( const char * src , char * dst , int * srcSizePtr , int targetDstSize )
{
# if (LZ4_HEAPMODE)
2018-05-07 23:52:40 +00:00
LZ4_stream_t * ctx = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ; /* malloc-calloc always properly aligned */
if ( ctx = = NULL ) return 0 ;
2017-09-11 23:25:50 +00:00
# else
LZ4_stream_t ctxBody ;
LZ4_stream_t * ctx = & ctxBody ;
# endif
int result = LZ4_compress_destSize_extState ( ctx , src , dst , srcSizePtr , targetDstSize ) ;
# if (LZ4_HEAPMODE)
FREEMEM ( ctx ) ;
# endif
return result ;
}
/*-******************************
* Streaming functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2022-08-16 12:43:50 +00:00
# if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2017-09-11 23:25:50 +00:00
LZ4_stream_t * LZ4_createStream ( void )
{
2019-05-01 14:53:48 +00:00
LZ4_stream_t * const lz4s = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ;
2022-08-16 12:43:50 +00:00
LZ4_STATIC_ASSERT ( sizeof ( LZ4_stream_t ) > = sizeof ( LZ4_stream_t_internal ) ) ;
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 4 , " LZ4_createStream %p " , lz4s ) ;
if ( lz4s = = NULL ) return NULL ;
2019-05-01 14:53:48 +00:00
LZ4_initStream ( lz4s , sizeof ( * lz4s ) ) ;
2017-09-11 23:25:50 +00:00
return lz4s ;
}
2022-08-16 12:43:50 +00:00
# endif
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
static size_t LZ4_stream_t_alignment ( void )
{
2020-11-16 16:46:11 +00:00
# if LZ4_ALIGN_TEST
typedef struct { char c ; LZ4_stream_t t ; } t_a ;
return sizeof ( t_a ) - sizeof ( LZ4_stream_t ) ;
# else
return 1 ; /* effectively disabled */
2019-05-01 14:53:48 +00:00
# endif
2020-11-16 16:46:11 +00:00
}
2019-05-01 14:53:48 +00:00
LZ4_stream_t * LZ4_initStream ( void * buffer , size_t size )
{
DEBUGLOG ( 5 , " LZ4_initStream " ) ;
2020-11-16 16:46:11 +00:00
if ( buffer = = NULL ) { return NULL ; }
if ( size < sizeof ( LZ4_stream_t ) ) { return NULL ; }
if ( ! LZ4_isAligned ( buffer , LZ4_stream_t_alignment ( ) ) ) return NULL ;
MEM_INIT ( buffer , 0 , sizeof ( LZ4_stream_t_internal ) ) ;
2019-05-01 14:53:48 +00:00
return ( LZ4_stream_t * ) buffer ;
}
/* resetStream is now deprecated,
* prefer initStream ( ) which is more general */
2017-09-11 23:25:50 +00:00
void LZ4_resetStream ( LZ4_stream_t * LZ4_stream )
{
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 5 , " LZ4_resetStream (ctx:%p) " , LZ4_stream ) ;
2020-11-16 16:46:11 +00:00
MEM_INIT ( LZ4_stream , 0 , sizeof ( LZ4_stream_t_internal ) ) ;
2017-09-11 23:25:50 +00:00
}
2018-05-07 23:52:40 +00:00
void LZ4_resetStream_fast ( LZ4_stream_t * ctx ) {
LZ4_prepareTable ( & ( ctx - > internal_donotuse ) , 0 , byU32 ) ;
}
2022-08-16 12:43:50 +00:00
# if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2017-09-11 23:25:50 +00:00
int LZ4_freeStream ( LZ4_stream_t * LZ4_stream )
{
if ( ! LZ4_stream ) return 0 ; /* support free on NULL */
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 5 , " LZ4_freeStream %p " , LZ4_stream ) ;
2017-09-11 23:25:50 +00:00
FREEMEM ( LZ4_stream ) ;
return ( 0 ) ;
}
2022-08-16 12:43:50 +00:00
# endif
2017-09-11 23:25:50 +00:00
# define HASH_UNIT sizeof(reg_t)
int LZ4_loadDict ( LZ4_stream_t * LZ4_dict , const char * dictionary , int dictSize )
{
LZ4_stream_t_internal * dict = & LZ4_dict - > internal_donotuse ;
2018-05-07 23:52:40 +00:00
const tableType_t tableType = byU32 ;
2017-09-11 23:25:50 +00:00
const BYTE * p = ( const BYTE * ) dictionary ;
const BYTE * const dictEnd = p + dictSize ;
const BYTE * base ;
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 4 , " LZ4_loadDict (%i bytes from %p into %p) " , dictSize , dictionary , LZ4_dict ) ;
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
/* It's necessary to reset the context,
* and not just continue it with prepareTable ( )
* to avoid any risk of generating overflowing matchIndex
* when compressing using this dictionary */
LZ4_resetStream ( LZ4_dict ) ;
/* We always increment the offset by 64 KB, since, if the dict is longer,
* we truncate it to the last 64 k , and if it ' s shorter , we still want to
* advance by a whole window length so we can provide the guarantee that
* there are only valid offsets in the window , which allows an optimization
* in LZ4_compress_fast_continue ( ) where it uses noDictIssue even when the
* dictionary isn ' t a full 64 k . */
dict - > currentOffset + = 64 KB ;
if ( dictSize < ( int ) HASH_UNIT ) {
return 0 ;
}
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
if ( ( dictEnd - p ) > 64 KB ) p = dictEnd - 64 KB ;
base = dictEnd - dict - > currentOffset ;
dict - > dictionary = p ;
dict - > dictSize = ( U32 ) ( dictEnd - p ) ;
dict - > tableType = ( U32 ) tableType ;
2017-09-11 23:25:50 +00:00
while ( p < = dictEnd - HASH_UNIT ) {
2018-05-07 23:52:40 +00:00
LZ4_putPosition ( p , dict - > hashTable , tableType , base ) ;
2017-09-11 23:25:50 +00:00
p + = 3 ;
}
2019-05-01 14:53:48 +00:00
return ( int ) dict - > dictSize ;
2017-09-11 23:25:50 +00:00
}
2022-08-16 12:43:50 +00:00
void LZ4_attach_dictionary ( LZ4_stream_t * workingStream , const LZ4_stream_t * dictionaryStream )
{
const LZ4_stream_t_internal * dictCtx = ( dictionaryStream = = NULL ) ? NULL :
2020-11-16 16:46:11 +00:00
& ( dictionaryStream - > internal_donotuse ) ;
DEBUGLOG ( 4 , " LZ4_attach_dictionary (%p, %p, size %u) " ,
workingStream , dictionaryStream ,
dictCtx ! = NULL ? dictCtx - > dictSize : 0 ) ;
2019-05-01 14:53:48 +00:00
2020-11-16 16:46:11 +00:00
if ( dictCtx ! = NULL ) {
2018-05-07 23:52:40 +00:00
/* If the current offset is zero, we will never look in the
* external dictionary context , since there is no value a table
* entry can take that indicate a miss . In that case , we need
* to bump the offset to something non - zero .
*/
2020-11-16 16:46:11 +00:00
if ( workingStream - > internal_donotuse . currentOffset = = 0 ) {
workingStream - > internal_donotuse . currentOffset = 64 KB ;
}
/* Don't actually attach an empty dictionary.
*/
if ( dictCtx - > dictSize = = 0 ) {
dictCtx = NULL ;
2018-05-07 23:52:40 +00:00
}
}
2020-11-16 16:46:11 +00:00
workingStream - > internal_donotuse . dictCtx = dictCtx ;
2018-05-07 23:52:40 +00:00
}
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
static void LZ4_renormDictT ( LZ4_stream_t_internal * LZ4_dict , int nextSize )
2017-09-11 23:25:50 +00:00
{
2019-05-01 14:53:48 +00:00
assert ( nextSize > = 0 ) ;
if ( LZ4_dict - > currentOffset + ( unsigned ) nextSize > 0x80000000 ) { /* potential ptrdiff_t overflow (32-bits mode) */
2017-09-11 23:25:50 +00:00
/* rescale hash table */
U32 const delta = LZ4_dict - > currentOffset - 64 KB ;
const BYTE * dictEnd = LZ4_dict - > dictionary + LZ4_dict - > dictSize ;
int i ;
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 4 , " LZ4_renormDictT " ) ;
2017-09-11 23:25:50 +00:00
for ( i = 0 ; i < LZ4_HASH_SIZE_U32 ; i + + ) {
if ( LZ4_dict - > hashTable [ i ] < delta ) LZ4_dict - > hashTable [ i ] = 0 ;
else LZ4_dict - > hashTable [ i ] - = delta ;
}
LZ4_dict - > currentOffset = 64 KB ;
if ( LZ4_dict - > dictSize > 64 KB ) LZ4_dict - > dictSize = 64 KB ;
LZ4_dict - > dictionary = dictEnd - LZ4_dict - > dictSize ;
}
}
2019-05-01 14:53:48 +00:00
int LZ4_compress_fast_continue ( LZ4_stream_t * LZ4_stream ,
const char * source , char * dest ,
int inputSize , int maxOutputSize ,
int acceleration )
2017-09-11 23:25:50 +00:00
{
2018-05-07 23:52:40 +00:00
const tableType_t tableType = byU32 ;
2022-08-16 12:43:50 +00:00
LZ4_stream_t_internal * const streamPtr = & LZ4_stream - > internal_donotuse ;
const char * dictEnd = streamPtr - > dictSize ? ( const char * ) streamPtr - > dictionary + streamPtr - > dictSize : NULL ;
2018-05-07 23:52:40 +00:00
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " LZ4_compress_fast_continue (inputSize=%i, dictSize=%u) " , inputSize , streamPtr - > dictSize ) ;
2017-09-11 23:25:50 +00:00
2022-08-16 12:43:50 +00:00
LZ4_renormDictT ( streamPtr , inputSize ) ; /* fix index overflow */
2020-11-16 16:46:11 +00:00
if ( acceleration < 1 ) acceleration = LZ4_ACCELERATION_DEFAULT ;
if ( acceleration > LZ4_ACCELERATION_MAX ) acceleration = LZ4_ACCELERATION_MAX ;
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
/* invalidate tiny dictionaries */
2022-08-16 12:43:50 +00:00
if ( ( streamPtr - > dictSize < 4 ) /* tiny dictionary : not enough for a hash */
& & ( dictEnd ! = source ) /* prefix mode */
& & ( inputSize > 0 ) /* tolerance : don't lose history, in case next invocation would use prefix mode */
& & ( streamPtr - > dictCtx = = NULL ) /* usingDictCtx */
) {
2018-05-07 23:52:40 +00:00
DEBUGLOG ( 5 , " LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small " , streamPtr - > dictSize , streamPtr - > dictionary ) ;
2022-08-16 12:43:50 +00:00
/* remove dictionary existence from history, to employ faster prefix mode */
2018-05-07 23:52:40 +00:00
streamPtr - > dictSize = 0 ;
streamPtr - > dictionary = ( const BYTE * ) source ;
2022-08-16 12:43:50 +00:00
dictEnd = source ;
2018-05-07 23:52:40 +00:00
}
2017-09-11 23:25:50 +00:00
/* Check overlapping input/dictionary space */
2022-08-16 12:43:50 +00:00
{ const char * const sourceEnd = source + inputSize ;
if ( ( sourceEnd > ( const char * ) streamPtr - > dictionary ) & & ( sourceEnd < dictEnd ) ) {
2017-09-11 23:25:50 +00:00
streamPtr - > dictSize = ( U32 ) ( dictEnd - sourceEnd ) ;
if ( streamPtr - > dictSize > 64 KB ) streamPtr - > dictSize = 64 KB ;
if ( streamPtr - > dictSize < 4 ) streamPtr - > dictSize = 0 ;
2022-08-16 12:43:50 +00:00
streamPtr - > dictionary = ( const BYTE * ) dictEnd - streamPtr - > dictSize ;
2017-09-11 23:25:50 +00:00
}
}
/* prefix mode : source data follows dictionary */
2022-08-16 12:43:50 +00:00
if ( dictEnd = = source ) {
2017-09-11 23:25:50 +00:00
if ( ( streamPtr - > dictSize < 64 KB ) & & ( streamPtr - > dictSize < streamPtr - > currentOffset ) )
2018-05-07 23:52:40 +00:00
return LZ4_compress_generic ( streamPtr , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , withPrefix64k , dictSmall , acceleration ) ;
2017-09-11 23:25:50 +00:00
else
2018-05-07 23:52:40 +00:00
return LZ4_compress_generic ( streamPtr , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , withPrefix64k , noDictIssue , acceleration ) ;
2017-09-11 23:25:50 +00:00
}
/* external dictionary mode */
{ int result ;
2018-05-07 23:52:40 +00:00
if ( streamPtr - > dictCtx ) {
/* We depend here on the fact that dictCtx'es (produced by
* LZ4_loadDict ) guarantee that their tables contain no references
* to offsets between dictCtx - > currentOffset - 64 KB and
* dictCtx - > currentOffset - dictCtx - > dictSize . This makes it safe
* to use noDictIssue even when the dict isn ' t a full 64 KB .
*/
if ( inputSize > 4 KB ) {
/* For compressing large blobs, it is faster to pay the setup
* cost to copy the dictionary ' s tables into the active context ,
* so that the compression loop is only looking into one table .
*/
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( streamPtr , streamPtr - > dictCtx , sizeof ( * streamPtr ) ) ;
2018-05-07 23:52:40 +00:00
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , usingExtDict , noDictIssue , acceleration ) ;
} else {
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , usingDictCtx , noDictIssue , acceleration ) ;
}
2022-08-16 12:43:50 +00:00
} else { /* small data <= 4 KB */
2018-05-07 23:52:40 +00:00
if ( ( streamPtr - > dictSize < 64 KB ) & & ( streamPtr - > dictSize < streamPtr - > currentOffset ) ) {
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , usingExtDict , dictSmall , acceleration ) ;
} else {
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , NULL , maxOutputSize , limitedOutput , tableType , usingExtDict , noDictIssue , acceleration ) ;
}
}
2017-09-11 23:25:50 +00:00
streamPtr - > dictionary = ( const BYTE * ) source ;
streamPtr - > dictSize = ( U32 ) inputSize ;
return result ;
}
}
2018-05-07 23:52:40 +00:00
/* Hidden debug function, to force-test external dictionary mode */
int LZ4_compress_forceExtDict ( LZ4_stream_t * LZ4_dict , const char * source , char * dest , int srcSize )
2017-09-11 23:25:50 +00:00
{
LZ4_stream_t_internal * streamPtr = & LZ4_dict - > internal_donotuse ;
int result ;
2018-05-07 23:52:40 +00:00
LZ4_renormDictT ( streamPtr , srcSize ) ;
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
if ( ( streamPtr - > dictSize < 64 KB ) & & ( streamPtr - > dictSize < streamPtr - > currentOffset ) ) {
result = LZ4_compress_generic ( streamPtr , source , dest , srcSize , NULL , 0 , notLimited , byU32 , usingExtDict , dictSmall , 1 ) ;
} else {
result = LZ4_compress_generic ( streamPtr , source , dest , srcSize , NULL , 0 , notLimited , byU32 , usingExtDict , noDictIssue , 1 ) ;
}
2017-09-11 23:25:50 +00:00
streamPtr - > dictionary = ( const BYTE * ) source ;
2018-05-07 23:52:40 +00:00
streamPtr - > dictSize = ( U32 ) srcSize ;
2017-09-11 23:25:50 +00:00
return result ;
}
/*! LZ4_saveDict() :
* If previously compressed data block is not guaranteed to remain available at its memory location ,
* save it into a safer place ( char * safeBuffer ) .
2022-08-16 12:43:50 +00:00
* Note : no need to call LZ4_loadDict ( ) afterwards , dictionary is immediately usable ,
* one can therefore call LZ4_compress_fast_continue ( ) right after .
* @ return : saved dictionary size in bytes ( necessarily < = dictSize ) , or 0 if error .
2017-09-11 23:25:50 +00:00
*/
int LZ4_saveDict ( LZ4_stream_t * LZ4_dict , char * safeBuffer , int dictSize )
{
LZ4_stream_t_internal * const dict = & LZ4_dict - > internal_donotuse ;
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " LZ4_saveDict : dictSize=%i, safeBuffer=%p " , dictSize , safeBuffer ) ;
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
if ( ( U32 ) dictSize > 64 KB ) { dictSize = 64 KB ; } /* useless to define a dictionary > 64 KB */
if ( ( U32 ) dictSize > dict - > dictSize ) { dictSize = ( int ) dict - > dictSize ; }
2017-09-11 23:25:50 +00:00
2020-11-16 16:46:11 +00:00
if ( safeBuffer = = NULL ) assert ( dictSize = = 0 ) ;
2022-08-16 12:43:50 +00:00
if ( dictSize > 0 ) {
const BYTE * const previousDictEnd = dict - > dictionary + dict - > dictSize ;
assert ( dict - > dictionary ) ;
LZ4_memmove ( safeBuffer , previousDictEnd - dictSize , ( size_t ) dictSize ) ;
}
2017-09-11 23:25:50 +00:00
dict - > dictionary = ( const BYTE * ) safeBuffer ;
dict - > dictSize = ( U32 ) dictSize ;
return dictSize ;
}
2018-09-12 23:52:16 +00:00
/*-*******************************
* Decompression functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
typedef enum { decode_full_block = 0 , partial_decode = 1 } earlyEnd_directive ;
# undef MIN
# define MIN(a,b) ( (a) < (b) ? (a) : (b) )
2022-08-16 12:43:50 +00:00
/* variant for decompress_unsafe()
* does not know end of input
* presumes input is well formed
* note : will consume at least one byte */
size_t read_long_length_no_check ( const BYTE * * pp )
{
size_t b , l = 0 ;
do { b = * * pp ; ( * pp ) + + ; l + = b ; } while ( b = = 255 ) ;
DEBUGLOG ( 6 , " read_long_length_no_check: +length=%zu using %zu input bytes " , l , l / 255 + 1 )
return l ;
}
/* core decoder variant for LZ4_decompress_fast*()
* for legacy support only : these entry points are deprecated .
* - Presumes input is correctly formed ( no defense vs malformed inputs )
* - Does not know input size ( presume input buffer is " large enough " )
* - Decompress a full block ( only )
* @ return : nb of bytes read from input .
* Note : this variant is not optimized for speed , just for maintenance .
* the goal is to remove support of decompress_fast * ( ) variants by v2 .0
* */
LZ4_FORCE_INLINE int
LZ4_decompress_unsafe_generic (
const BYTE * const istart ,
BYTE * const ostart ,
int decompressedSize ,
size_t prefixSize ,
const BYTE * const dictStart , /* only if dict==usingExtDict */
const size_t dictSize /* note: =0 if dictStart==NULL */
)
{
const BYTE * ip = istart ;
BYTE * op = ( BYTE * ) ostart ;
BYTE * const oend = ostart + decompressedSize ;
const BYTE * const prefixStart = ostart - prefixSize ;
DEBUGLOG ( 5 , " LZ4_decompress_unsafe_generic " ) ;
if ( dictStart = = NULL ) assert ( dictSize = = 0 ) ;
while ( 1 ) {
/* start new sequence */
unsigned token = * ip + + ;
/* literals */
{ size_t ll = token > > ML_BITS ;
if ( ll = = 15 ) {
/* long literal length */
ll + = read_long_length_no_check ( & ip ) ;
}
if ( ( size_t ) ( oend - op ) < ll ) return - 1 ; /* output buffer overflow */
LZ4_memmove ( op , ip , ll ) ; /* support in-place decompression */
op + = ll ;
ip + = ll ;
if ( ( size_t ) ( oend - op ) < MFLIMIT ) {
if ( op = = oend ) break ; /* end of block */
DEBUGLOG ( 5 , " invalid: literals end at distance %zi from end of block " , oend - op ) ;
/* incorrect end of block :
* last match must start at least MFLIMIT = = 12 bytes before end of output block */
return - 1 ;
} }
/* match */
{ size_t ml = token & 15 ;
size_t const offset = LZ4_readLE16 ( ip ) ;
ip + = 2 ;
if ( ml = = 15 ) {
/* long literal length */
ml + = read_long_length_no_check ( & ip ) ;
}
ml + = MINMATCH ;
if ( ( size_t ) ( oend - op ) < ml ) return - 1 ; /* output buffer overflow */
{ const BYTE * match = op - offset ;
/* out of range */
if ( offset > ( size_t ) ( op - prefixStart ) + dictSize ) {
DEBUGLOG ( 6 , " offset out of range " ) ;
return - 1 ;
}
/* check special case : extDict */
if ( offset > ( size_t ) ( op - prefixStart ) ) {
/* extDict scenario */
const BYTE * const dictEnd = dictStart + dictSize ;
const BYTE * extMatch = dictEnd - ( offset - ( size_t ) ( op - prefixStart ) ) ;
size_t const extml = ( size_t ) ( dictEnd - extMatch ) ;
if ( extml > ml ) {
/* match entirely within extDict */
LZ4_memmove ( op , extMatch , ml ) ;
op + = ml ;
ml = 0 ;
} else {
/* match split between extDict & prefix */
LZ4_memmove ( op , extMatch , extml ) ;
op + = extml ;
ml - = extml ;
}
match = prefixStart ;
}
/* match copy - slow variant, supporting overlap copy */
{ size_t u ;
for ( u = 0 ; u < ml ; u + + ) {
op [ u ] = match [ u ] ;
} } }
op + = ml ;
if ( ( size_t ) ( oend - op ) < LASTLITERALS ) {
DEBUGLOG ( 5 , " invalid: match ends at distance %zi from end of block " , oend - op ) ;
/* incorrect end of block :
* last match must stop at least LASTLITERALS = = 5 bytes before end of output block */
return - 1 ;
}
} /* match */
} /* main loop */
return ( int ) ( ip - istart ) ;
}
2019-05-01 14:53:48 +00:00
/* Read the variable-length literal or match length.
*
2022-08-16 12:43:50 +00:00
* @ ip : input pointer
* @ ilimit : position after which if length is not decoded , the input is necessarily corrupted .
* @ initial_check - check ip > = ipmax before start of loop . Returns initial_error if so .
* @ error ( output ) - error code . Must be set to 0 before call .
* */
typedef size_t Rvl_t ;
static const Rvl_t rvl_error = ( Rvl_t ) ( - 1 ) ;
LZ4_FORCE_INLINE Rvl_t
read_variable_length ( const BYTE * * ip , const BYTE * ilimit ,
int initial_check )
{
Rvl_t s , length = 0 ;
assert ( ip ! = NULL ) ;
assert ( * ip ! = NULL ) ;
assert ( ilimit ! = NULL ) ;
if ( initial_check & & unlikely ( ( * ip ) > = ilimit ) ) { /* read limit reached */
return rvl_error ;
2019-05-01 14:53:48 +00:00
}
2020-11-16 16:46:11 +00:00
do {
s = * * ip ;
( * ip ) + + ;
length + = s ;
2022-08-16 12:43:50 +00:00
if ( unlikely ( ( * ip ) > ilimit ) ) { /* read limit reached */
return rvl_error ;
}
/* accumulator overflow detection (32-bit mode only) */
if ( ( sizeof ( length ) < 8 ) & & unlikely ( length > ( ( Rvl_t ) ( - 1 ) / 2 ) ) ) {
return rvl_error ;
2020-11-16 16:46:11 +00:00
}
} while ( s = = 255 ) ;
2019-05-01 14:53:48 +00:00
2020-11-16 16:46:11 +00:00
return length ;
2019-05-01 14:53:48 +00:00
}
2017-09-11 23:25:50 +00:00
/*! LZ4_decompress_generic() :
* This generic decompression function covers all use cases .
* It shall be instantiated several times , using different sets of directives .
* Note that it is important for performance that this function really get inlined ,
* in order to remove useless branches during compilation optimization .
*/
2018-09-12 23:52:16 +00:00
LZ4_FORCE_INLINE int
LZ4_decompress_generic (
2017-09-11 23:25:50 +00:00
const char * const src ,
char * const dst ,
int srcSize ,
int outputSize , /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
2018-09-12 23:52:16 +00:00
earlyEnd_directive partialDecoding , /* full, partial */
dict_directive dict , /* noDict, withPrefix64k, usingExtDict */
2018-03-04 14:23:46 +00:00
const BYTE * const lowPrefix , /* always <= dst, == dst when no prefix */
2017-09-11 23:25:50 +00:00
const BYTE * const dictStart , /* only if dict==usingExtDict */
const size_t dictSize /* note : = 0 if noDict */
)
{
2022-08-16 12:43:50 +00:00
if ( ( src = = NULL ) | | ( outputSize < 0 ) ) { return - 1 ; }
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
{ const BYTE * ip = ( const BYTE * ) src ;
const BYTE * const iend = ip + srcSize ;
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
BYTE * op = ( BYTE * ) dst ;
BYTE * const oend = op + outputSize ;
BYTE * cpy ;
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
const BYTE * const dictEnd = ( dictStart = = NULL ) ? NULL : dictStart + dictSize ;
2017-09-11 23:25:50 +00:00
2022-08-16 12:43:50 +00:00
const int checkOffset = ( dictSize < ( int ) ( 64 KB ) ) ;
2018-05-07 23:52:40 +00:00
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
/* Set up the "end" pointers for the shortcut. */
2022-08-16 12:43:50 +00:00
const BYTE * const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/ ;
const BYTE * const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/ ;
2017-09-11 23:25:50 +00:00
const BYTE * match ;
size_t offset ;
2019-05-01 14:53:48 +00:00
unsigned token ;
size_t length ;
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
2019-05-01 14:53:48 +00:00
DEBUGLOG ( 5 , " LZ4_decompress_generic (srcSize:%i, dstSize:%i) " , srcSize , outputSize ) ;
2018-05-07 23:52:40 +00:00
2019-05-01 14:53:48 +00:00
/* Special cases */
assert ( lowPrefix < = op ) ;
2022-08-16 12:43:50 +00:00
if ( unlikely ( outputSize = = 0 ) ) {
2020-11-16 16:46:11 +00:00
/* Empty output buffer */
if ( partialDecoding ) return 0 ;
return ( ( srcSize = = 1 ) & & ( * ip = = 0 ) ) ? 0 : - 1 ;
}
2022-08-16 12:43:50 +00:00
if ( unlikely ( srcSize = = 0 ) ) { return - 1 ; }
2019-05-01 14:53:48 +00:00
2022-08-16 12:43:50 +00:00
/* LZ4_FAST_DEC_LOOP:
* designed for modern OoO performance cpus ,
* where copying reliably 32 - bytes is preferable to an unpredictable branch .
* note : fast loop may show a regression for some client arm chips . */
2019-05-01 14:53:48 +00:00
# if LZ4_FAST_DEC_LOOP
if ( ( oend - op ) < FASTLOOP_SAFE_DISTANCE ) {
DEBUGLOG ( 6 , " skip fast decode loop " ) ;
goto safe_decode ;
}
2022-08-16 12:43:50 +00:00
/* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
2019-05-01 14:53:48 +00:00
while ( 1 ) {
/* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
assert ( oend - op > = FASTLOOP_SAFE_DISTANCE ) ;
2022-08-16 12:43:50 +00:00
assert ( ip < iend ) ;
2019-05-01 14:53:48 +00:00
token = * ip + + ;
length = token > > ML_BITS ; /* literal length */
/* decode literal length */
if ( length = = RUN_MASK ) {
2022-08-16 12:43:50 +00:00
size_t const addl = read_variable_length ( & ip , iend - RUN_MASK , 1 ) ;
if ( addl = = rvl_error ) { goto _output_error ; }
length + = addl ;
if ( unlikely ( ( uptrval ) ( op ) + length < ( uptrval ) ( op ) ) ) { goto _output_error ; } /* overflow detection */
if ( unlikely ( ( uptrval ) ( ip ) + length < ( uptrval ) ( ip ) ) ) { goto _output_error ; } /* overflow detection */
2019-05-01 14:53:48 +00:00
/* copy literals */
cpy = op + length ;
LZ4_STATIC_ASSERT ( MFLIMIT > = WILDCOPYLENGTH ) ;
2022-08-16 12:43:50 +00:00
if ( ( cpy > oend - 32 ) | | ( ip + length > iend - 32 ) ) { goto safe_literal_copy ; }
LZ4_wildCopy32 ( op , ip , cpy ) ;
2019-05-01 14:53:48 +00:00
ip + = length ; op = cpy ;
} else {
cpy = op + length ;
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 7 , " copy %u bytes in a 16-bytes stripe " , ( unsigned ) length ) ;
/* We don't need to check oend, since we check it once for each loop below */
if ( ip > iend - ( 16 + 1 /*max lit + offset + nextToken*/ ) ) { goto safe_literal_copy ; }
/* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
LZ4_memcpy ( op , ip , 16 ) ;
2019-05-01 14:53:48 +00:00
ip + = length ; op = cpy ;
}
/* get offset */
offset = LZ4_readLE16 ( ip ) ; ip + = 2 ;
2018-05-07 23:52:40 +00:00
match = op - offset ;
2022-08-16 12:43:50 +00:00
assert ( match < = op ) ; /* overflow check */
2019-05-01 14:53:48 +00:00
/* get matchlength */
length = token & ML_MASK ;
if ( length = = ML_MASK ) {
2022-08-16 12:43:50 +00:00
size_t const addl = read_variable_length ( & ip , iend - LASTLITERALS + 1 , 0 ) ;
if ( addl = = rvl_error ) { goto _output_error ; }
length + = addl ;
2019-05-01 14:53:48 +00:00
length + = MINMATCH ;
2022-08-16 12:43:50 +00:00
if ( unlikely ( ( uptrval ) ( op ) + length < ( uptrval ) op ) ) { goto _output_error ; } /* overflow detection */
if ( ( checkOffset ) & & ( unlikely ( match + dictSize < lowPrefix ) ) ) { goto _output_error ; } /* Error : offset outside buffers */
2019-05-01 14:53:48 +00:00
if ( op + length > = oend - FASTLOOP_SAFE_DISTANCE ) {
goto safe_match_copy ;
}
} else {
length + = MINMATCH ;
if ( op + length > = oend - FASTLOOP_SAFE_DISTANCE ) {
goto safe_match_copy ;
}
2022-08-16 12:43:50 +00:00
/* Fastpath check: skip LZ4_wildCopy32 when true */
2020-11-16 16:46:11 +00:00
if ( ( dict = = withPrefix64k ) | | ( match > = lowPrefix ) ) {
2019-05-01 14:53:48 +00:00
if ( offset > = 8 ) {
2020-11-16 16:46:11 +00:00
assert ( match > = lowPrefix ) ;
assert ( match < = op ) ;
assert ( op + 18 < = oend ) ;
LZ4_memcpy ( op , match , 8 ) ;
LZ4_memcpy ( op + 8 , match + 8 , 8 ) ;
LZ4_memcpy ( op + 16 , match + 16 , 2 ) ;
2019-05-01 14:53:48 +00:00
op + = length ;
continue ;
} } }
2020-11-16 16:46:11 +00:00
if ( checkOffset & & ( unlikely ( match + dictSize < lowPrefix ) ) ) { goto _output_error ; } /* Error : offset outside buffers */
2019-05-01 14:53:48 +00:00
/* match starting within external dictionary */
if ( ( dict = = usingExtDict ) & & ( match < lowPrefix ) ) {
2022-08-16 12:43:50 +00:00
assert ( dictEnd ! = NULL ) ;
2019-05-01 14:53:48 +00:00
if ( unlikely ( op + length > oend - LASTLITERALS ) ) {
2020-11-16 16:46:11 +00:00
if ( partialDecoding ) {
DEBUGLOG ( 7 , " partialDecoding: dictionary match, close to dstEnd " ) ;
length = MIN ( length , ( size_t ) ( oend - op ) ) ;
} else {
goto _output_error ; /* end-of-block condition violated */
} }
2019-05-01 14:53:48 +00:00
if ( length < = ( size_t ) ( lowPrefix - match ) ) {
/* match fits entirely within external dictionary : just copy */
2022-08-16 12:43:50 +00:00
LZ4_memmove ( op , dictEnd - ( lowPrefix - match ) , length ) ;
2019-05-01 14:53:48 +00:00
op + = length ;
} else {
/* match stretches into both external dictionary and current block */
size_t const copySize = ( size_t ) ( lowPrefix - match ) ;
size_t const restSize = length - copySize ;
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , dictEnd - copySize , copySize ) ;
2019-05-01 14:53:48 +00:00
op + = copySize ;
if ( restSize > ( size_t ) ( op - lowPrefix ) ) { /* overlap copy */
BYTE * const endOfMatch = op + restSize ;
const BYTE * copyFrom = lowPrefix ;
2020-11-16 16:46:11 +00:00
while ( op < endOfMatch ) { * op + + = * copyFrom + + ; }
2019-05-01 14:53:48 +00:00
} else {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , lowPrefix , restSize ) ;
2019-05-01 14:53:48 +00:00
op + = restSize ;
} }
2018-03-04 14:23:46 +00:00
continue ;
}
2018-05-07 23:52:40 +00:00
2019-05-01 14:53:48 +00:00
/* copy match within block */
cpy = op + length ;
2018-03-04 14:23:46 +00:00
2019-05-01 14:53:48 +00:00
assert ( ( op < = oend ) & & ( oend - op > = 32 ) ) ;
if ( unlikely ( offset < 16 ) ) {
LZ4_memcpy_using_offset ( op , match , cpy , offset ) ;
} else {
LZ4_wildCopy32 ( op , match , cpy ) ;
}
op = cpy ; /* wildcopy correction */
2017-09-11 23:25:50 +00:00
}
2019-05-01 14:53:48 +00:00
safe_decode :
# endif
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
/* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
while ( 1 ) {
2022-08-16 12:43:50 +00:00
assert ( ip < iend ) ;
2019-05-01 14:53:48 +00:00
token = * ip + + ;
length = token > > ML_BITS ; /* literal length */
/* A two-stage shortcut for the most common case:
* 1 ) If the literal length is 0. .14 , and there is enough space ,
* enter the shortcut and copy 16 bytes on behalf of the literals
* ( in the fast mode , only 8 bytes can be safely copied this way ) .
* 2 ) Further if the match length is 4. .18 , copy 18 bytes in a similar
* manner ; but we ensure that there ' s enough space in the output for
* those 18 bytes earlier , upon entering the shortcut ( in other words ,
* there is a combined check for both stages ) .
*/
2022-08-16 12:43:50 +00:00
if ( ( length ! = RUN_MASK )
2019-05-01 14:53:48 +00:00
/* strictly "less than" on input, to re-enter the loop with at least one byte */
2022-08-16 12:43:50 +00:00
& & likely ( ( ip < shortiend ) & ( op < = shortoend ) ) ) {
2019-05-01 14:53:48 +00:00
/* Copy the literals */
2022-08-16 12:43:50 +00:00
LZ4_memcpy ( op , ip , 16 ) ;
2019-05-01 14:53:48 +00:00
op + = length ; ip + = length ;
/* The second stage: prepare for match copying, decode full info.
* If it doesn ' t work out , the info won ' t be wasted . */
length = token & ML_MASK ; /* match length */
offset = LZ4_readLE16 ( ip ) ; ip + = 2 ;
match = op - offset ;
assert ( match < = op ) ; /* check overflow */
/* Do not deal with overlapping matches. */
if ( ( length ! = ML_MASK )
& & ( offset > = 8 )
& & ( dict = = withPrefix64k | | match > = lowPrefix ) ) {
/* Copy the match. */
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op + 0 , match + 0 , 8 ) ;
LZ4_memcpy ( op + 8 , match + 8 , 8 ) ;
LZ4_memcpy ( op + 16 , match + 16 , 2 ) ;
2019-05-01 14:53:48 +00:00
op + = length + MINMATCH ;
/* Both stages worked, load the next token. */
continue ;
}
/* The second stage didn't work out, but the info is ready.
* Propel it right to the point of match copying . */
goto _copy_match ;
2017-09-11 23:25:50 +00:00
}
2019-05-01 14:53:48 +00:00
/* decode literal length */
if ( length = = RUN_MASK ) {
2022-08-16 12:43:50 +00:00
size_t const addl = read_variable_length ( & ip , iend - RUN_MASK , 1 ) ;
if ( addl = = rvl_error ) { goto _output_error ; }
length + = addl ;
if ( unlikely ( ( uptrval ) ( op ) + length < ( uptrval ) ( op ) ) ) { goto _output_error ; } /* overflow detection */
if ( unlikely ( ( uptrval ) ( ip ) + length < ( uptrval ) ( ip ) ) ) { goto _output_error ; } /* overflow detection */
2018-09-12 23:52:16 +00:00
}
2019-05-01 14:53:48 +00:00
/* copy literals */
cpy = op + length ;
# if LZ4_FAST_DEC_LOOP
safe_literal_copy :
# endif
LZ4_STATIC_ASSERT ( MFLIMIT > = WILDCOPYLENGTH ) ;
2022-08-16 12:43:50 +00:00
if ( ( cpy > oend - MFLIMIT ) | | ( ip + length > iend - ( 2 + 1 + LASTLITERALS ) ) ) {
2020-11-16 16:46:11 +00:00
/* We've either hit the input parsing restriction or the output parsing restriction.
* In the normal scenario , decoding a full block , it must be the last sequence ,
* otherwise it ' s an error ( invalid input or dimensions ) .
* In partialDecoding scenario , it ' s necessary to ensure there is no buffer overflow .
*/
2019-05-01 14:53:48 +00:00
if ( partialDecoding ) {
2020-11-16 16:46:11 +00:00
/* Since we are partial decoding we may be in this block because of the output parsing
* restriction , which is not valid since the output buffer is allowed to be undersized .
*/
DEBUGLOG ( 7 , " partialDecoding: copying literals, close to input or output end " )
DEBUGLOG ( 7 , " partialDecoding: literal length = %u " , ( unsigned ) length ) ;
DEBUGLOG ( 7 , " partialDecoding: remaining space in dstBuffer : %i " , ( int ) ( oend - op ) ) ;
DEBUGLOG ( 7 , " partialDecoding: remaining space in srcBuffer : %i " , ( int ) ( iend - ip ) ) ;
/* Finishing in the middle of a literals segment,
* due to lack of input .
*/
if ( ip + length > iend ) {
length = ( size_t ) ( iend - ip ) ;
cpy = op + length ;
}
/* Finishing in the middle of a literals segment,
* due to lack of output space .
*/
if ( cpy > oend ) {
cpy = oend ;
assert ( op < = oend ) ;
length = ( size_t ) ( oend - op ) ;
}
2019-05-01 14:53:48 +00:00
} else {
2020-11-16 16:46:11 +00:00
/* We must be on the last sequence (or invalid) because of the parsing limitations
* so check that we exactly consume the input and don ' t overrun the output buffer .
*/
2022-08-16 12:43:50 +00:00
if ( ( ip + length ! = iend ) | | ( cpy > oend ) ) {
2020-11-16 16:46:11 +00:00
DEBUGLOG ( 6 , " should have been last run of literals " )
DEBUGLOG ( 6 , " ip(%p) + length(%i) = %p != iend (%p) " , ip , ( int ) length , ip + length , iend ) ;
DEBUGLOG ( 6 , " or cpy(%p) > oend(%p) " , cpy , oend ) ;
goto _output_error ;
}
2019-05-01 14:53:48 +00:00
}
2022-08-16 12:43:50 +00:00
LZ4_memmove ( op , ip , length ) ; /* supports overlapping memory regions, for in-place decompression scenarios */
2019-05-01 14:53:48 +00:00
ip + = length ;
op + = length ;
2020-11-16 16:46:11 +00:00
/* Necessarily EOF when !partialDecoding.
* When partialDecoding , it is EOF if we ' ve either
* filled the output buffer or
* can ' t proceed with reading an offset for following match .
*/
if ( ! partialDecoding | | ( cpy = = oend ) | | ( ip > = ( iend - 2 ) ) ) {
2019-05-01 14:53:48 +00:00
break ;
}
} else {
2022-08-16 12:43:50 +00:00
LZ4_wildCopy8 ( op , ip , cpy ) ; /* can overwrite up to 8 bytes beyond cpy */
2019-05-01 14:53:48 +00:00
ip + = length ; op = cpy ;
}
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
/* get offset */
offset = LZ4_readLE16 ( ip ) ; ip + = 2 ;
match = op - offset ;
2018-05-07 23:52:40 +00:00
2019-05-01 14:53:48 +00:00
/* get matchlength */
length = token & ML_MASK ;
_copy_match :
if ( length = = ML_MASK ) {
2022-08-16 12:43:50 +00:00
size_t const addl = read_variable_length ( & ip , iend - LASTLITERALS + 1 , 0 ) ;
if ( addl = = rvl_error ) { goto _output_error ; }
length + = addl ;
if ( unlikely ( ( uptrval ) ( op ) + length < ( uptrval ) op ) ) goto _output_error ; /* overflow detection */
2019-05-01 14:53:48 +00:00
}
length + = MINMATCH ;
2018-05-07 23:52:40 +00:00
2019-05-01 14:53:48 +00:00
# if LZ4_FAST_DEC_LOOP
safe_match_copy :
# endif
2020-11-16 16:46:11 +00:00
if ( ( checkOffset ) & & ( unlikely ( match + dictSize < lowPrefix ) ) ) goto _output_error ; /* Error : offset outside buffers */
2019-05-01 14:53:48 +00:00
/* match starting within external dictionary */
if ( ( dict = = usingExtDict ) & & ( match < lowPrefix ) ) {
2022-08-16 12:43:50 +00:00
assert ( dictEnd ! = NULL ) ;
2019-05-01 14:53:48 +00:00
if ( unlikely ( op + length > oend - LASTLITERALS ) ) {
if ( partialDecoding ) length = MIN ( length , ( size_t ) ( oend - op ) ) ;
else goto _output_error ; /* doesn't respect parsing restriction */
}
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
if ( length < = ( size_t ) ( lowPrefix - match ) ) {
/* match fits entirely within external dictionary : just copy */
2022-08-16 12:43:50 +00:00
LZ4_memmove ( op , dictEnd - ( lowPrefix - match ) , length ) ;
2019-05-01 14:53:48 +00:00
op + = length ;
} else {
/* match stretches into both external dictionary and current block */
size_t const copySize = ( size_t ) ( lowPrefix - match ) ;
size_t const restSize = length - copySize ;
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , dictEnd - copySize , copySize ) ;
2019-05-01 14:53:48 +00:00
op + = copySize ;
if ( restSize > ( size_t ) ( op - lowPrefix ) ) { /* overlap copy */
BYTE * const endOfMatch = op + restSize ;
const BYTE * copyFrom = lowPrefix ;
while ( op < endOfMatch ) * op + + = * copyFrom + + ;
} else {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , lowPrefix , restSize ) ;
2019-05-01 14:53:48 +00:00
op + = restSize ;
} }
continue ;
2018-09-12 23:52:16 +00:00
}
2020-11-16 16:46:11 +00:00
assert ( match > = lowPrefix ) ;
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
/* copy match within block */
cpy = op + length ;
/* partialDecoding : may end anywhere within the block */
assert ( op < = oend ) ;
if ( partialDecoding & & ( cpy > oend - MATCH_SAFEGUARD_DISTANCE ) ) {
size_t const mlen = MIN ( length , ( size_t ) ( oend - op ) ) ;
const BYTE * const matchEnd = match + mlen ;
BYTE * const copyEnd = op + mlen ;
if ( matchEnd > op ) { /* overlap copy */
2020-11-16 16:46:11 +00:00
while ( op < copyEnd ) { * op + + = * match + + ; }
2017-09-11 23:25:50 +00:00
} else {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , match , mlen ) ;
2019-05-01 14:53:48 +00:00
}
op = copyEnd ;
2020-11-16 16:46:11 +00:00
if ( op = = oend ) { break ; }
2019-05-01 14:53:48 +00:00
continue ;
}
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
if ( unlikely ( offset < 8 ) ) {
2020-11-16 16:46:11 +00:00
LZ4_write32 ( op , 0 ) ; /* silence msan warning when offset==0 */
2019-05-01 14:53:48 +00:00
op [ 0 ] = match [ 0 ] ;
op [ 1 ] = match [ 1 ] ;
op [ 2 ] = match [ 2 ] ;
op [ 3 ] = match [ 3 ] ;
match + = inc32table [ offset ] ;
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op + 4 , match , 4 ) ;
2019-05-01 14:53:48 +00:00
match - = dec64table [ offset ] ;
2018-09-12 23:52:16 +00:00
} else {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , match , 8 ) ;
2019-05-01 14:53:48 +00:00
match + = 8 ;
2018-09-12 23:52:16 +00:00
}
2019-05-01 14:53:48 +00:00
op + = 8 ;
if ( unlikely ( cpy > oend - MATCH_SAFEGUARD_DISTANCE ) ) {
BYTE * const oCopyLimit = oend - ( WILDCOPYLENGTH - 1 ) ;
2020-11-16 16:46:11 +00:00
if ( cpy > oend - LASTLITERALS ) { goto _output_error ; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2019-05-01 14:53:48 +00:00
if ( op < oCopyLimit ) {
LZ4_wildCopy8 ( op , match , oCopyLimit ) ;
match + = oCopyLimit - op ;
op = oCopyLimit ;
}
2020-11-16 16:46:11 +00:00
while ( op < cpy ) { * op + + = * match + + ; }
2019-05-01 14:53:48 +00:00
} else {
2020-11-16 16:46:11 +00:00
LZ4_memcpy ( op , match , 8 ) ;
if ( length > 16 ) { LZ4_wildCopy8 ( op + 8 , match + 8 , cpy ) ; }
2017-09-11 23:25:50 +00:00
}
2019-05-01 14:53:48 +00:00
op = cpy ; /* wildcopy correction */
2017-09-11 23:25:50 +00:00
}
2019-05-01 14:53:48 +00:00
/* end of decoding */
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " decoded %i bytes " , ( int ) ( ( ( char * ) op ) - dst ) ) ;
return ( int ) ( ( ( char * ) op ) - dst ) ; /* Nb of output bytes decoded */
2017-09-11 23:25:50 +00:00
2019-05-01 14:53:48 +00:00
/* Overflow error detected */
_output_error :
return ( int ) ( - ( ( ( const char * ) ip ) - src ) ) - 1 ;
}
2017-09-11 23:25:50 +00:00
}
2018-05-07 23:52:40 +00:00
/*===== Instantiate the API decoding functions. =====*/
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2017-09-11 23:25:50 +00:00
int LZ4_decompress_safe ( const char * source , char * dest , int compressedSize , int maxDecompressedSize )
{
2018-05-07 23:52:40 +00:00
return LZ4_decompress_generic ( source , dest , compressedSize , maxDecompressedSize ,
2022-08-16 12:43:50 +00:00
decode_full_block , noDict ,
2018-05-07 23:52:40 +00:00
( BYTE * ) dest , NULL , 0 ) ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2018-09-12 23:52:16 +00:00
int LZ4_decompress_safe_partial ( const char * src , char * dst , int compressedSize , int targetOutputSize , int dstCapacity )
2017-09-11 23:25:50 +00:00
{
2018-09-12 23:52:16 +00:00
dstCapacity = MIN ( targetOutputSize , dstCapacity ) ;
return LZ4_decompress_generic ( src , dst , compressedSize , dstCapacity ,
2022-08-16 12:43:50 +00:00
partial_decode ,
2018-09-12 23:52:16 +00:00
noDict , ( BYTE * ) dst , NULL , 0 ) ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2017-09-11 23:25:50 +00:00
int LZ4_decompress_fast ( const char * source , char * dest , int originalSize )
{
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " LZ4_decompress_fast " ) ;
return LZ4_decompress_unsafe_generic (
( const BYTE * ) source , ( BYTE * ) dest , originalSize ,
0 , NULL , 0 ) ;
2017-09-11 23:25:50 +00:00
}
2018-05-07 23:52:40 +00:00
/*===== Instantiate a few more decoding cases, used more than once. =====*/
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2 /* Exported, an obsolete API function. */
2018-05-07 23:52:40 +00:00
int LZ4_decompress_safe_withPrefix64k ( const char * source , char * dest , int compressedSize , int maxOutputSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize ,
2022-08-16 12:43:50 +00:00
decode_full_block , withPrefix64k ,
( BYTE * ) dest - 64 KB , NULL , 0 ) ;
}
LZ4_FORCE_O2
static int LZ4_decompress_safe_partial_withPrefix64k ( const char * source , char * dest , int compressedSize , int targetOutputSize , int dstCapacity )
{
dstCapacity = MIN ( targetOutputSize , dstCapacity ) ;
return LZ4_decompress_generic ( source , dest , compressedSize , dstCapacity ,
partial_decode , withPrefix64k ,
2018-05-07 23:52:40 +00:00
( BYTE * ) dest - 64 KB , NULL , 0 ) ;
}
/* Another obsolete API function, paired with the previous one. */
int LZ4_decompress_fast_withPrefix64k ( const char * source , char * dest , int originalSize )
{
2022-08-16 12:43:50 +00:00
return LZ4_decompress_unsafe_generic (
( const BYTE * ) source , ( BYTE * ) dest , originalSize ,
64 KB , NULL , 0 ) ;
2018-05-07 23:52:40 +00:00
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2018-05-07 23:52:40 +00:00
static int LZ4_decompress_safe_withSmallPrefix ( const char * source , char * dest , int compressedSize , int maxOutputSize ,
size_t prefixSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize ,
2022-08-16 12:43:50 +00:00
decode_full_block , noDict ,
( BYTE * ) dest - prefixSize , NULL , 0 ) ;
}
LZ4_FORCE_O2
static int LZ4_decompress_safe_partial_withSmallPrefix ( const char * source , char * dest , int compressedSize , int targetOutputSize , int dstCapacity ,
size_t prefixSize )
{
dstCapacity = MIN ( targetOutputSize , dstCapacity ) ;
return LZ4_decompress_generic ( source , dest , compressedSize , dstCapacity ,
partial_decode , noDict ,
2018-05-07 23:52:40 +00:00
( BYTE * ) dest - prefixSize , NULL , 0 ) ;
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2018-09-12 23:52:16 +00:00
int LZ4_decompress_safe_forceExtDict ( const char * source , char * dest ,
int compressedSize , int maxOutputSize ,
const void * dictStart , size_t dictSize )
2018-05-07 23:52:40 +00:00
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize ,
2022-08-16 12:43:50 +00:00
decode_full_block , usingExtDict ,
( BYTE * ) dest , ( const BYTE * ) dictStart , dictSize ) ;
}
LZ4_FORCE_O2
int LZ4_decompress_safe_partial_forceExtDict ( const char * source , char * dest ,
int compressedSize , int targetOutputSize , int dstCapacity ,
const void * dictStart , size_t dictSize )
{
dstCapacity = MIN ( targetOutputSize , dstCapacity ) ;
return LZ4_decompress_generic ( source , dest , compressedSize , dstCapacity ,
partial_decode , usingExtDict ,
2018-05-07 23:52:40 +00:00
( BYTE * ) dest , ( const BYTE * ) dictStart , dictSize ) ;
}
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2018-05-07 23:52:40 +00:00
static int LZ4_decompress_fast_extDict ( const char * source , char * dest , int originalSize ,
const void * dictStart , size_t dictSize )
{
2022-08-16 12:43:50 +00:00
return LZ4_decompress_unsafe_generic (
( const BYTE * ) source , ( BYTE * ) dest , originalSize ,
0 , ( const BYTE * ) dictStart , dictSize ) ;
2018-05-07 23:52:40 +00:00
}
/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
* of the dictionary is passed as prefix , and the second via dictStart + dictSize .
* These routines are used only once , in LZ4_decompress_ * _continue ( ) .
*/
LZ4_FORCE_INLINE
int LZ4_decompress_safe_doubleDict ( const char * source , char * dest , int compressedSize , int maxOutputSize ,
size_t prefixSize , const void * dictStart , size_t dictSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize ,
2022-08-16 12:43:50 +00:00
decode_full_block , usingExtDict ,
2018-05-07 23:52:40 +00:00
( BYTE * ) dest - prefixSize , ( const BYTE * ) dictStart , dictSize ) ;
}
2017-09-11 23:25:50 +00:00
/*===== streaming decompression functions =====*/
2022-08-16 12:43:50 +00:00
# if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2017-09-11 23:25:50 +00:00
LZ4_streamDecode_t * LZ4_createStreamDecode ( void )
{
2022-08-16 12:43:50 +00:00
LZ4_STATIC_ASSERT ( sizeof ( LZ4_streamDecode_t ) > = sizeof ( LZ4_streamDecode_t_internal ) ) ;
return ( LZ4_streamDecode_t * ) ALLOC_AND_ZERO ( sizeof ( LZ4_streamDecode_t ) ) ;
2017-09-11 23:25:50 +00:00
}
int LZ4_freeStreamDecode ( LZ4_streamDecode_t * LZ4_stream )
{
2020-11-16 16:46:11 +00:00
if ( LZ4_stream = = NULL ) { return 0 ; } /* support free on NULL */
2017-09-11 23:25:50 +00:00
FREEMEM ( LZ4_stream ) ;
return 0 ;
}
2022-08-16 12:43:50 +00:00
# endif
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
/*! LZ4_setStreamDecode() :
* Use this function to instruct where to find the dictionary .
* This function is not necessary if previous data is still available where it was decoded .
* Loading a size of 0 is allowed ( same effect as no dictionary ) .
* @ return : 1 if OK , 0 if error
2017-09-11 23:25:50 +00:00
*/
int LZ4_setStreamDecode ( LZ4_streamDecode_t * LZ4_streamDecode , const char * dictionary , int dictSize )
{
LZ4_streamDecode_t_internal * lz4sd = & LZ4_streamDecode - > internal_donotuse ;
2022-08-16 12:43:50 +00:00
lz4sd - > prefixSize = ( size_t ) dictSize ;
if ( dictSize ) {
assert ( dictionary ! = NULL ) ;
lz4sd - > prefixEnd = ( const BYTE * ) dictionary + dictSize ;
} else {
lz4sd - > prefixEnd = ( const BYTE * ) dictionary ;
}
2017-09-11 23:25:50 +00:00
lz4sd - > externalDict = NULL ;
lz4sd - > extDictSize = 0 ;
return 1 ;
}
2018-05-07 23:52:40 +00:00
/*! LZ4_decoderRingBufferSize() :
* when setting a ring buffer for streaming decompression ( optional scenario ) ,
* provides the minimum size of this ring buffer
* to be compatible with any source respecting maxBlockSize condition .
* Note : in a ring buffer scenario ,
* blocks are presumed decompressed next to each other .
* When not enough space remains for next block ( remainingSize < maxBlockSize ) ,
* decoding resumes from beginning of ring buffer .
* @ return : minimum ring buffer size ,
* or 0 if there is an error ( invalid maxBlockSize ) .
*/
int LZ4_decoderRingBufferSize ( int maxBlockSize )
{
if ( maxBlockSize < 0 ) return 0 ;
if ( maxBlockSize > LZ4_MAX_INPUT_SIZE ) return 0 ;
if ( maxBlockSize < 16 ) maxBlockSize = 16 ;
return LZ4_DECODER_RING_BUFFER_SIZE ( maxBlockSize ) ;
}
2017-09-11 23:25:50 +00:00
/*
* _continue ( ) :
These decoding functions allow decompression of multiple blocks in " streaming " mode .
Previously decoded blocks must still be available at the memory position where they were decoded .
If it ' s not possible , save the relevant part of decoded data into a safe buffer ,
and indicate where it stands using LZ4_setStreamDecode ( )
*/
2020-11-16 16:46:11 +00:00
LZ4_FORCE_O2
2017-09-11 23:25:50 +00:00
int LZ4_decompress_safe_continue ( LZ4_streamDecode_t * LZ4_streamDecode , const char * source , char * dest , int compressedSize , int maxOutputSize )
{
LZ4_streamDecode_t_internal * lz4sd = & LZ4_streamDecode - > internal_donotuse ;
int result ;
2018-05-07 23:52:40 +00:00
if ( lz4sd - > prefixSize = = 0 ) {
/* The first call, no dictionary yet. */
assert ( lz4sd - > extDictSize = = 0 ) ;
result = LZ4_decompress_safe ( source , dest , compressedSize , maxOutputSize ) ;
if ( result < = 0 ) return result ;
2019-05-01 14:53:48 +00:00
lz4sd - > prefixSize = ( size_t ) result ;
2018-05-07 23:52:40 +00:00
lz4sd - > prefixEnd = ( BYTE * ) dest + result ;
} else if ( lz4sd - > prefixEnd = = ( BYTE * ) dest ) {
/* They're rolling the current segment. */
if ( lz4sd - > prefixSize > = 64 KB - 1 )
result = LZ4_decompress_safe_withPrefix64k ( source , dest , compressedSize , maxOutputSize ) ;
else if ( lz4sd - > extDictSize = = 0 )
result = LZ4_decompress_safe_withSmallPrefix ( source , dest , compressedSize , maxOutputSize ,
lz4sd - > prefixSize ) ;
else
result = LZ4_decompress_safe_doubleDict ( source , dest , compressedSize , maxOutputSize ,
lz4sd - > prefixSize , lz4sd - > externalDict , lz4sd - > extDictSize ) ;
2017-09-11 23:25:50 +00:00
if ( result < = 0 ) return result ;
2019-05-01 14:53:48 +00:00
lz4sd - > prefixSize + = ( size_t ) result ;
2017-09-11 23:25:50 +00:00
lz4sd - > prefixEnd + = result ;
} else {
2018-05-07 23:52:40 +00:00
/* The buffer wraps around, or they're switching to another buffer. */
2017-09-11 23:25:50 +00:00
lz4sd - > extDictSize = lz4sd - > prefixSize ;
lz4sd - > externalDict = lz4sd - > prefixEnd - lz4sd - > extDictSize ;
2018-09-12 23:52:16 +00:00
result = LZ4_decompress_safe_forceExtDict ( source , dest , compressedSize , maxOutputSize ,
lz4sd - > externalDict , lz4sd - > extDictSize ) ;
2017-09-11 23:25:50 +00:00
if ( result < = 0 ) return result ;
2019-05-01 14:53:48 +00:00
lz4sd - > prefixSize = ( size_t ) result ;
2017-09-11 23:25:50 +00:00
lz4sd - > prefixEnd = ( BYTE * ) dest + result ;
}
return result ;
}
2022-08-16 12:43:50 +00:00
LZ4_FORCE_O2 int
LZ4_decompress_fast_continue ( LZ4_streamDecode_t * LZ4_streamDecode ,
const char * source , char * dest , int originalSize )
2017-09-11 23:25:50 +00:00
{
2022-08-16 12:43:50 +00:00
LZ4_streamDecode_t_internal * const lz4sd =
( assert ( LZ4_streamDecode ! = NULL ) , & LZ4_streamDecode - > internal_donotuse ) ;
2017-09-11 23:25:50 +00:00
int result ;
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " LZ4_decompress_fast_continue (toDecodeSize=%i) " , originalSize ) ;
2019-05-01 14:53:48 +00:00
assert ( originalSize > = 0 ) ;
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
if ( lz4sd - > prefixSize = = 0 ) {
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " first invocation : no prefix nor extDict " ) ;
2018-05-07 23:52:40 +00:00
assert ( lz4sd - > extDictSize = = 0 ) ;
result = LZ4_decompress_fast ( source , dest , originalSize ) ;
if ( result < = 0 ) return result ;
2019-05-01 14:53:48 +00:00
lz4sd - > prefixSize = ( size_t ) originalSize ;
2018-05-07 23:52:40 +00:00
lz4sd - > prefixEnd = ( BYTE * ) dest + originalSize ;
} else if ( lz4sd - > prefixEnd = = ( BYTE * ) dest ) {
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " continue using existing prefix " ) ;
result = LZ4_decompress_unsafe_generic (
( const BYTE * ) source , ( BYTE * ) dest , originalSize ,
lz4sd - > prefixSize ,
lz4sd - > externalDict , lz4sd - > extDictSize ) ;
2017-09-11 23:25:50 +00:00
if ( result < = 0 ) return result ;
2019-05-01 14:53:48 +00:00
lz4sd - > prefixSize + = ( size_t ) originalSize ;
2017-09-11 23:25:50 +00:00
lz4sd - > prefixEnd + = originalSize ;
} else {
2022-08-16 12:43:50 +00:00
DEBUGLOG ( 5 , " prefix becomes extDict " ) ;
2017-09-11 23:25:50 +00:00
lz4sd - > extDictSize = lz4sd - > prefixSize ;
lz4sd - > externalDict = lz4sd - > prefixEnd - lz4sd - > extDictSize ;
2018-05-07 23:52:40 +00:00
result = LZ4_decompress_fast_extDict ( source , dest , originalSize ,
lz4sd - > externalDict , lz4sd - > extDictSize ) ;
2017-09-11 23:25:50 +00:00
if ( result < = 0 ) return result ;
2019-05-01 14:53:48 +00:00
lz4sd - > prefixSize = ( size_t ) originalSize ;
2017-09-11 23:25:50 +00:00
lz4sd - > prefixEnd = ( BYTE * ) dest + originalSize ;
}
return result ;
}
/*
Advanced decoding functions :
* _usingDict ( ) :
These decoding functions work the same as " _continue " ones ,
the dictionary must be explicitly provided within parameters
*/
2018-05-07 23:52:40 +00:00
int LZ4_decompress_safe_usingDict ( const char * source , char * dest , int compressedSize , int maxOutputSize , const char * dictStart , int dictSize )
2017-09-11 23:25:50 +00:00
{
if ( dictSize = = 0 )
2018-05-07 23:52:40 +00:00
return LZ4_decompress_safe ( source , dest , compressedSize , maxOutputSize ) ;
2017-09-11 23:25:50 +00:00
if ( dictStart + dictSize = = dest ) {
2020-11-16 16:46:11 +00:00
if ( dictSize > = 64 KB - 1 ) {
2018-05-07 23:52:40 +00:00
return LZ4_decompress_safe_withPrefix64k ( source , dest , compressedSize , maxOutputSize ) ;
2020-11-16 16:46:11 +00:00
}
assert ( dictSize > = 0 ) ;
return LZ4_decompress_safe_withSmallPrefix ( source , dest , compressedSize , maxOutputSize , ( size_t ) dictSize ) ;
2017-09-11 23:25:50 +00:00
}
2020-11-16 16:46:11 +00:00
assert ( dictSize > = 0 ) ;
return LZ4_decompress_safe_forceExtDict ( source , dest , compressedSize , maxOutputSize , dictStart , ( size_t ) dictSize ) ;
2017-09-11 23:25:50 +00:00
}
2022-08-16 12:43:50 +00:00
int LZ4_decompress_safe_partial_usingDict ( const char * source , char * dest , int compressedSize , int targetOutputSize , int dstCapacity , const char * dictStart , int dictSize )
{
if ( dictSize = = 0 )
return LZ4_decompress_safe_partial ( source , dest , compressedSize , targetOutputSize , dstCapacity ) ;
if ( dictStart + dictSize = = dest ) {
if ( dictSize > = 64 KB - 1 ) {
return LZ4_decompress_safe_partial_withPrefix64k ( source , dest , compressedSize , targetOutputSize , dstCapacity ) ;
}
assert ( dictSize > = 0 ) ;
return LZ4_decompress_safe_partial_withSmallPrefix ( source , dest , compressedSize , targetOutputSize , dstCapacity , ( size_t ) dictSize ) ;
}
assert ( dictSize > = 0 ) ;
return LZ4_decompress_safe_partial_forceExtDict ( source , dest , compressedSize , targetOutputSize , dstCapacity , dictStart , ( size_t ) dictSize ) ;
}
2017-09-11 23:25:50 +00:00
int LZ4_decompress_fast_usingDict ( const char * source , char * dest , int originalSize , const char * dictStart , int dictSize )
{
2018-05-07 23:52:40 +00:00
if ( dictSize = = 0 | | dictStart + dictSize = = dest )
2022-08-16 12:43:50 +00:00
return LZ4_decompress_unsafe_generic (
( const BYTE * ) source , ( BYTE * ) dest , originalSize ,
( size_t ) dictSize , NULL , 0 ) ;
2020-11-16 16:46:11 +00:00
assert ( dictSize > = 0 ) ;
return LZ4_decompress_fast_extDict ( source , dest , originalSize , dictStart , ( size_t ) dictSize ) ;
2017-09-11 23:25:50 +00:00
}
/*=*************************************************
* Obsolete Functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* obsolete compression functions */
2018-05-07 23:52:40 +00:00
int LZ4_compress_limitedOutput ( const char * source , char * dest , int inputSize , int maxOutputSize )
{
return LZ4_compress_default ( source , dest , inputSize , maxOutputSize ) ;
}
2020-11-16 16:46:11 +00:00
int LZ4_compress ( const char * src , char * dest , int srcSize )
2018-05-07 23:52:40 +00:00
{
2020-11-16 16:46:11 +00:00
return LZ4_compress_default ( src , dest , srcSize , LZ4_compressBound ( srcSize ) ) ;
2018-05-07 23:52:40 +00:00
}
int LZ4_compress_limitedOutput_withState ( void * state , const char * src , char * dst , int srcSize , int dstSize )
{
return LZ4_compress_fast_extState ( state , src , dst , srcSize , dstSize , 1 ) ;
}
int LZ4_compress_withState ( void * state , const char * src , char * dst , int srcSize )
{
return LZ4_compress_fast_extState ( state , src , dst , srcSize , LZ4_compressBound ( srcSize ) , 1 ) ;
}
int LZ4_compress_limitedOutput_continue ( LZ4_stream_t * LZ4_stream , const char * src , char * dst , int srcSize , int dstCapacity )
{
return LZ4_compress_fast_continue ( LZ4_stream , src , dst , srcSize , dstCapacity , 1 ) ;
}
int LZ4_compress_continue ( LZ4_stream_t * LZ4_stream , const char * source , char * dest , int inputSize )
{
return LZ4_compress_fast_continue ( LZ4_stream , source , dest , inputSize , LZ4_compressBound ( inputSize ) , 1 ) ;
}
2017-09-11 23:25:50 +00:00
/*
2018-05-07 23:52:40 +00:00
These decompression functions are deprecated and should no longer be used .
2017-09-11 23:25:50 +00:00
They are only provided here for compatibility with older user programs .
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
*/
2018-05-07 23:52:40 +00:00
int LZ4_uncompress ( const char * source , char * dest , int outputSize )
{
return LZ4_decompress_fast ( source , dest , outputSize ) ;
}
int LZ4_uncompress_unknownOutputSize ( const char * source , char * dest , int isize , int maxOutputSize )
{
return LZ4_decompress_safe ( source , dest , isize , maxOutputSize ) ;
}
2017-09-11 23:25:50 +00:00
/* Obsolete Streaming functions */
2022-08-16 12:43:50 +00:00
int LZ4_sizeofStreamState ( void ) { return sizeof ( LZ4_stream_t ) ; }
2017-09-11 23:25:50 +00:00
int LZ4_resetStreamState ( void * state , char * inputBuffer )
{
2018-05-07 23:52:40 +00:00
( void ) inputBuffer ;
LZ4_resetStream ( ( LZ4_stream_t * ) state ) ;
2017-09-11 23:25:50 +00:00
return 0 ;
}
2022-08-16 12:43:50 +00:00
# if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2017-09-11 23:25:50 +00:00
void * LZ4_create ( char * inputBuffer )
{
2018-05-07 23:52:40 +00:00
( void ) inputBuffer ;
return LZ4_createStream ( ) ;
2017-09-11 23:25:50 +00:00
}
2022-08-16 12:43:50 +00:00
# endif
2017-09-11 23:25:50 +00:00
2018-05-07 23:52:40 +00:00
char * LZ4_slideInputBuffer ( void * state )
2017-09-11 23:25:50 +00:00
{
2018-05-07 23:52:40 +00:00
/* avoid const char * -> char * conversion warning */
return ( char * ) ( uptrval ) ( ( LZ4_stream_t * ) state ) - > internal_donotuse . dictionary ;
2017-09-11 23:25:50 +00:00
}
# endif /* LZ4_COMMONDEFS_ONLY */
2020-11-16 17:13:19 +00:00
}