mirror of
https://github.com/wolfpld/tracy.git
synced 2024-11-26 07:54:36 +00:00
Update lz4 to 1.9.3, without tracy-specific changes.
This commit is contained in:
parent
e580dfeed3
commit
bae4c5f8df
1206
common/tracy_lz4.cpp
1206
common/tracy_lz4.cpp
File diff suppressed because it is too large
Load Diff
@ -32,22 +32,21 @@
|
||||
- LZ4 homepage : http://www.lz4.org
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
*/
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef TRACY_LZ4_H_2983827168210
|
||||
#define TRACY_LZ4_H_2983827168210
|
||||
#ifndef LZ4_H_2983827168210
|
||||
#define LZ4_H_2983827168210
|
||||
|
||||
/* --- Dependency --- */
|
||||
#include <stddef.h> /* size_t */
|
||||
#include <stdint.h>
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
|
||||
/**
|
||||
Introduction
|
||||
|
||||
LZ4 is lossless compression algorithm, providing compression speed at 500 MB/s per core,
|
||||
LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
|
||||
scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
|
||||
multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
|
||||
|
||||
@ -59,16 +58,19 @@ namespace tracy
|
||||
- unbounded multiple steps (described as Streaming compression)
|
||||
|
||||
lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
|
||||
Decompressing a block requires additional metadata, such as its compressed size.
|
||||
Decompressing such a compressed block requires additional metadata.
|
||||
Exact metadata depends on exact decompression function.
|
||||
For the typical case of LZ4_decompress_safe(),
|
||||
metadata includes block's compressed size, and maximum bound of decompressed size.
|
||||
Each application is free to encode and pass such metadata in whichever way it wants.
|
||||
|
||||
lz4.h only handle blocks, it can not generate Frames.
|
||||
|
||||
Blocks are different from Frames (doc/lz4_Frame_format.md).
|
||||
Frames bundle both blocks and metadata in a specified manner.
|
||||
This are required for compressed data to be self-contained and portable.
|
||||
Embedding metadata is required for compressed data to be self-contained and portable.
|
||||
Frame format is delivered through a companion API, declared in lz4frame.h.
|
||||
Note that the `lz4` CLI can only manage frames.
|
||||
The `lz4` CLI can only manage frames.
|
||||
*/
|
||||
|
||||
/*^***************************************************************
|
||||
@ -98,7 +100,7 @@ namespace tracy
|
||||
/*------ Version ------*/
|
||||
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
|
||||
#define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */
|
||||
#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
|
||||
|
||||
@ -130,29 +132,35 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
|
||||
* Simple Functions
|
||||
**************************************/
|
||||
/*! LZ4_compress_default() :
|
||||
Compresses 'srcSize' bytes from buffer 'src'
|
||||
into already allocated 'dst' buffer of size 'dstCapacity'.
|
||||
Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
|
||||
It also runs faster, so it's a recommended setting.
|
||||
If the function cannot compress 'src' into a more limited 'dst' budget,
|
||||
compression stops *immediately*, and the function result is zero.
|
||||
In which case, 'dst' content is undefined (invalid).
|
||||
srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
|
||||
dstCapacity : size of buffer 'dst' (which must be already allocated)
|
||||
@return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
|
||||
or 0 if compression fails
|
||||
Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
|
||||
*/
|
||||
* Compresses 'srcSize' bytes from buffer 'src'
|
||||
* into already allocated 'dst' buffer of size 'dstCapacity'.
|
||||
* Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
|
||||
* It also runs faster, so it's a recommended setting.
|
||||
* If the function cannot compress 'src' into a more limited 'dst' budget,
|
||||
* compression stops *immediately*, and the function result is zero.
|
||||
* In which case, 'dst' content is undefined (invalid).
|
||||
* srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
|
||||
* dstCapacity : size of buffer 'dst' (which must be already allocated)
|
||||
* @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
|
||||
* or 0 if compression fails
|
||||
* Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
|
||||
|
||||
/*! LZ4_decompress_safe() :
|
||||
compressedSize : is the exact complete size of the compressed block.
|
||||
dstCapacity : is the size of destination buffer, which must be already allocated.
|
||||
@return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
|
||||
If destination buffer is not large enough, decoding will stop and output an error code (negative value).
|
||||
If the source stream is detected malformed, the function will stop decoding and return a negative result.
|
||||
Note : This function is protected against malicious data packets (never writes outside 'dst' buffer, nor read outside 'source' buffer).
|
||||
*/
|
||||
* compressedSize : is the exact complete size of the compressed block.
|
||||
* dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
|
||||
* @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
|
||||
* If destination buffer is not large enough, decoding will stop and output an error code (negative value).
|
||||
* If the source stream is detected malformed, the function will stop decoding and return a negative result.
|
||||
* Note 1 : This function is protected against malicious data packets :
|
||||
* it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
|
||||
* even if the compressed block is maliciously modified to order the decoder to do these actions.
|
||||
* In such case, the decoder stops immediately, and considers the compressed block malformed.
|
||||
* Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
|
||||
* The implementation is free to send / store / derive this information in whichever way is most beneficial.
|
||||
* If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
|
||||
|
||||
|
||||
@ -178,7 +186,8 @@ LZ4LIB_API int LZ4_compressBound(int inputSize);
|
||||
The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
|
||||
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
|
||||
An acceleration value of "1" is the same as regular LZ4_compress_default()
|
||||
Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c).
|
||||
Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
|
||||
Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
@ -204,7 +213,18 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d
|
||||
* New value is necessarily <= input value.
|
||||
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
|
||||
* or 0 if compression fails.
|
||||
*/
|
||||
*
|
||||
* Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
|
||||
* the produced compressed content could, in specific circumstances,
|
||||
* require to be decompressed into a destination buffer larger
|
||||
* by at least 1 byte than the content to decompress.
|
||||
* If an application uses `LZ4_compress_destSize()`,
|
||||
* it's highly recommended to update liblz4 to v1.9.2 or better.
|
||||
* If this can't be done or ensured,
|
||||
* the receiving decompression function should provide
|
||||
* a dstCapacity which is > decompressedSize, by at least 1 byte.
|
||||
* See https://github.com/lz4/lz4/issues/859 for details
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
||||
|
||||
|
||||
@ -212,25 +232,35 @@ LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePt
|
||||
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
|
||||
* into destination buffer 'dst' of size 'dstCapacity'.
|
||||
* Up to 'targetOutputSize' bytes will be decoded.
|
||||
* The function stops decoding on reaching this objective,
|
||||
* which can boost performance when only the beginning of a block is required.
|
||||
* The function stops decoding on reaching this objective.
|
||||
* This can be useful to boost performance
|
||||
* whenever only the beginning of a block is required.
|
||||
*
|
||||
* @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity)
|
||||
* @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
|
||||
* If source stream is detected malformed, function returns a negative result.
|
||||
*
|
||||
* Note : @return can be < targetOutputSize, if compressed block contains less data.
|
||||
* Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
|
||||
*
|
||||
* Note 2 : this function features 2 parameters, targetOutputSize and dstCapacity,
|
||||
* and expects targetOutputSize <= dstCapacity.
|
||||
* It effectively stops decoding on reaching targetOutputSize,
|
||||
* Note 2 : targetOutputSize must be <= dstCapacity
|
||||
*
|
||||
* Note 3 : this function effectively stops decoding on reaching targetOutputSize,
|
||||
* so dstCapacity is kind of redundant.
|
||||
* This is because in a previous version of this function,
|
||||
* decoding operation would not "break" a sequence in the middle.
|
||||
* As a consequence, there was no guarantee that decoding would stop at exactly targetOutputSize,
|
||||
* This is because in older versions of this function,
|
||||
* decoding operation would still write complete sequences.
|
||||
* Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
|
||||
* it could write more bytes, though only up to dstCapacity.
|
||||
* Some "margin" used to be required for this operation to work properly.
|
||||
* This is no longer necessary.
|
||||
* The function nonetheless keeps its signature, in an effort to not break API.
|
||||
* Thankfully, this is no longer necessary.
|
||||
* The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
|
||||
*
|
||||
* Note 4 : If srcSize is the exact size of the block,
|
||||
* then targetOutputSize can be any value,
|
||||
* including larger than the block's decompressed size.
|
||||
* The function will, at most, generate block's decompressed size.
|
||||
*
|
||||
* Note 5 : If srcSize is _larger_ than block's compressed size,
|
||||
* then targetOutputSize **MUST** be <= block's decompressed size.
|
||||
* Otherwise, *silent corruption will occur*.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
|
||||
|
||||
@ -389,6 +419,8 @@ LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecod
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
|
||||
|
||||
#endif /* LZ4_H_2983827168210 */
|
||||
|
||||
|
||||
/*^*************************************
|
||||
* !!!!!! STATIC LINKING ONLY !!!!!!
|
||||
@ -414,14 +446,17 @@ LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int sr
|
||||
* define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
|
||||
******************************************************************************/
|
||||
|
||||
#ifdef LZ4_STATIC_LINKING_ONLY
|
||||
|
||||
#ifndef LZ4_STATIC_3504398509
|
||||
#define LZ4_STATIC_3504398509
|
||||
|
||||
#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
|
||||
#define LZ4LIB_STATIC_API LZ4LIB_API
|
||||
#else
|
||||
#define LZ4LIB_STATIC_API
|
||||
#endif
|
||||
|
||||
#ifdef LZ4_STATIC_LINKING_ONLY
|
||||
|
||||
|
||||
/*! LZ4_compress_fast_extState_fastReset() :
|
||||
* A variant of LZ4_compress_fast_extState().
|
||||
@ -463,78 +498,135 @@ LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const c
|
||||
*/
|
||||
LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
|
||||
|
||||
|
||||
/*! In-place compression and decompression
|
||||
*
|
||||
* It's possible to have input and output sharing the same buffer,
|
||||
* for highly contrained memory environments.
|
||||
* In both cases, it requires input to lay at the end of the buffer,
|
||||
* and decompression to start at beginning of the buffer.
|
||||
* Buffer size must feature some margin, hence be larger than final size.
|
||||
*
|
||||
* |<------------------------buffer--------------------------------->|
|
||||
* |<-----------compressed data--------->|
|
||||
* |<-----------decompressed size------------------>|
|
||||
* |<----margin---->|
|
||||
*
|
||||
* This technique is more useful for decompression,
|
||||
* since decompressed size is typically larger,
|
||||
* and margin is short.
|
||||
*
|
||||
* In-place decompression will work inside any buffer
|
||||
* which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
|
||||
* This presumes that decompressedSize > compressedSize.
|
||||
* Otherwise, it means compression actually expanded data,
|
||||
* and it would be more efficient to store such data with a flag indicating it's not compressed.
|
||||
* This can happen when data is not compressible (already compressed, or encrypted).
|
||||
*
|
||||
* For in-place compression, margin is larger, as it must be able to cope with both
|
||||
* history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
|
||||
* and data expansion, which can happen when input is not compressible.
|
||||
* As a consequence, buffer size requirements are much higher,
|
||||
* and memory savings offered by in-place compression are more limited.
|
||||
*
|
||||
* There are ways to limit this cost for compression :
|
||||
* - Reduce history size, by modifying LZ4_DISTANCE_MAX.
|
||||
* Note that it is a compile-time constant, so all compressions will apply this limit.
|
||||
* Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
|
||||
* so it's a reasonable trick when inputs are known to be small.
|
||||
* - Require the compressor to deliver a "maximum compressed size".
|
||||
* This is the `dstCapacity` parameter in `LZ4_compress*()`.
|
||||
* When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
|
||||
* in which case, the return code will be 0 (zero).
|
||||
* The caller must be ready for these cases to happen,
|
||||
* and typically design a backup scheme to send data uncompressed.
|
||||
* The combination of both techniques can significantly reduce
|
||||
* the amount of margin required for in-place compression.
|
||||
*
|
||||
* In-place compression can work in any buffer
|
||||
* which size is >= (maxCompressedSize)
|
||||
* with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
|
||||
* LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
|
||||
* so it's possible to reduce memory requirements by playing with them.
|
||||
*/
|
||||
|
||||
#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
|
||||
#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
|
||||
|
||||
#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
|
||||
# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
|
||||
#endif
|
||||
|
||||
#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
|
||||
#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
|
||||
|
||||
#endif /* LZ4_STATIC_3504398509 */
|
||||
#endif /* LZ4_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
|
||||
#ifndef LZ4_H_98237428734687
|
||||
#define LZ4_H_98237428734687
|
||||
|
||||
/*-************************************************************
|
||||
* PRIVATE DEFINITIONS
|
||||
* Private Definitions
|
||||
**************************************************************
|
||||
* Do not use these definitions directly.
|
||||
* They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
|
||||
* Accessing members will expose code to API and/or ABI break in future versions of the library.
|
||||
* Accessing members will expose user code to API and/or ABI break in future versions of the library.
|
||||
**************************************************************/
|
||||
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
|
||||
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
|
||||
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
|
||||
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
uint32_t hashTable[LZ4_HASH_SIZE_U32];
|
||||
uint32_t currentOffset;
|
||||
uint16_t dirty;
|
||||
uint16_t tableType;
|
||||
const uint8_t* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
uint32_t dictSize;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const uint8_t* externalDict;
|
||||
size_t extDictSize;
|
||||
const uint8_t* prefixEnd;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
# include <stdint.h>
|
||||
typedef int8_t LZ4_i8;
|
||||
typedef uint8_t LZ4_byte;
|
||||
typedef uint16_t LZ4_u16;
|
||||
typedef uint32_t LZ4_u32;
|
||||
#else
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
unsigned int hashTable[LZ4_HASH_SIZE_U32];
|
||||
unsigned int currentOffset;
|
||||
unsigned short dirty;
|
||||
unsigned short tableType;
|
||||
const unsigned char* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
unsigned int dictSize;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const unsigned char* externalDict;
|
||||
const unsigned char* prefixEnd;
|
||||
size_t extDictSize;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
typedef signed char LZ4_i8;
|
||||
typedef unsigned char LZ4_byte;
|
||||
typedef unsigned short LZ4_u16;
|
||||
typedef unsigned int LZ4_u32;
|
||||
#endif
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
|
||||
LZ4_u32 currentOffset;
|
||||
LZ4_u32 tableType;
|
||||
const LZ4_byte* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
LZ4_u32 dictSize;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const LZ4_byte* externalDict;
|
||||
size_t extDictSize;
|
||||
const LZ4_byte* prefixEnd;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
|
||||
/*! LZ4_stream_t :
|
||||
* information structure to track an LZ4 stream.
|
||||
* Do not use below internal definitions directly !
|
||||
* Declare or allocate an LZ4_stream_t instead.
|
||||
* LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
|
||||
* The structure definition can be convenient for static allocation
|
||||
* (on stack, or as part of larger structure).
|
||||
* Init this structure with LZ4_initStream() before first use.
|
||||
* note : only use this definition in association with static linking !
|
||||
* this definition is not API/ABI safe, and may change in a future version.
|
||||
* this definition is not API/ABI safe, and may change in future versions.
|
||||
*/
|
||||
#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4 + ((sizeof(void*)==16) ? 4 : 0) /*AS-400*/ )
|
||||
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
|
||||
#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
|
||||
#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
|
||||
union LZ4_stream_u {
|
||||
unsigned long long table[LZ4_STREAMSIZE_U64];
|
||||
void* table[LZ4_STREAMSIZE_VOIDP];
|
||||
LZ4_stream_t_internal internal_donotuse;
|
||||
} ; /* previously typedef'd to LZ4_stream_t */
|
||||
}; /* previously typedef'd to LZ4_stream_t */
|
||||
|
||||
|
||||
/*! LZ4_initStream() : v1.9.0+
|
||||
* An LZ4_stream_t structure must be initialized at least once.
|
||||
@ -568,6 +660,7 @@ union LZ4_streamDecode_u {
|
||||
} ; /* previously typedef'd to LZ4_streamDecode_t */
|
||||
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Obsolete Functions
|
||||
**************************************/
|
||||
@ -586,34 +679,34 @@ union LZ4_streamDecode_u {
|
||||
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
|
||||
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define LZ4_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif (LZ4_GCC_VERSION >= 301)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
|
||||
# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
|
||||
# define LZ4_DEPRECATED(message)
|
||||
# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
|
||||
# define LZ4_DEPRECATED(message) /* disabled */
|
||||
# endif
|
||||
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
/* Obsolete compression functions */
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* source, char* dest, int sourceSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize);
|
||||
/*! Obsolete compression functions (since v1.7.3) */
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
|
||||
/* Obsolete decompression functions */
|
||||
/*! Obsolete decompression functions (since v1.8.0) */
|
||||
LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
|
||||
LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
|
||||
|
||||
/* Obsolete streaming functions; degraded functionality; do not use!
|
||||
/* Obsolete streaming functions (since v1.7.0)
|
||||
* degraded functionality; do not use!
|
||||
*
|
||||
* In order to perform streaming compression, these functions depended on data
|
||||
* that is no longer tracked in the state. They have been preserved as well as
|
||||
@ -627,23 +720,22 @@ LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStre
|
||||
LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
|
||||
LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
|
||||
|
||||
/* Obsolete streaming decoding functions */
|
||||
/*! Obsolete streaming decoding functions (since v1.7.0) */
|
||||
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
|
||||
LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
|
||||
|
||||
/*! LZ4_decompress_fast() : **unsafe!**
|
||||
/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
|
||||
* These functions used to be faster than LZ4_decompress_safe(),
|
||||
* but it has changed, and they are now slower than LZ4_decompress_safe().
|
||||
* but this is no longer the case. They are now slower.
|
||||
* This is because LZ4_decompress_fast() doesn't know the input size,
|
||||
* and therefore must progress more cautiously in the input buffer to not read beyond the end of block.
|
||||
* and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
|
||||
* On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
|
||||
* As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
|
||||
*
|
||||
* The last remaining LZ4_decompress_fast() specificity is that
|
||||
* it can decompress a block without knowing its compressed size.
|
||||
* Such functionality could be achieved in a more secure manner,
|
||||
* by also providing the maximum size of input buffer,
|
||||
* but it would require new prototypes, and adaptation of the implementation to this new use case.
|
||||
* Such functionality can be achieved in a more secure manner
|
||||
* by employing LZ4_decompress_safe_partial().
|
||||
*
|
||||
* Parameters:
|
||||
* originalSize : is the uncompressed size to regenerate.
|
||||
@ -658,7 +750,6 @@ LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4
|
||||
* But they may happen if input data is invalid (error or intentional tampering).
|
||||
* As a consequence, use these functions in trusted environments with trusted data **only**.
|
||||
*/
|
||||
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
|
||||
@ -674,6 +765,10 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int or
|
||||
*/
|
||||
LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
|
||||
|
||||
}
|
||||
|
||||
#endif /* LZ4_H_2983827168210 */
|
||||
#endif /* LZ4_H_98237428734687 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
@ -50,10 +50,10 @@
|
||||
|
||||
/*=== Dependency ===*/
|
||||
#define LZ4_HC_STATIC_LINKING_ONLY
|
||||
#include "tracy_lz4hc.hpp"
|
||||
#include "lz4hc.h"
|
||||
|
||||
|
||||
/*=== Common LZ4 definitions ===*/
|
||||
/*=== Common definitions ===*/
|
||||
#if defined(__GNUC__)
|
||||
# pragma GCC diagnostic ignored "-Wunused-function"
|
||||
#endif
|
||||
@ -61,21 +61,15 @@
|
||||
# pragma clang diagnostic ignored "-Wunused-function"
|
||||
#endif
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
#define LZ4_COMMONDEFS_ONLY
|
||||
#ifndef LZ4_SRC_INCLUDED
|
||||
#include "lz4.c" /* LZ4_count, constants, mem */
|
||||
#endif
|
||||
|
||||
|
||||
/*=== Enums ===*/
|
||||
typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
|
||||
|
||||
}
|
||||
|
||||
#define LZ4_COMMONDEFS_ONLY
|
||||
#ifndef LZ4_SRC_INCLUDED
|
||||
#include "tracy_lz4.cpp" /* LZ4_count, constants, mem */
|
||||
#endif
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
|
||||
/*=== Constants ===*/
|
||||
#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
|
||||
@ -99,7 +93,7 @@ static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)
|
||||
**************************************/
|
||||
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
|
||||
{
|
||||
MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
|
||||
MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
|
||||
MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
|
||||
}
|
||||
|
||||
@ -158,13 +152,28 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
|
||||
return back;
|
||||
}
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define LZ4HC_rotl32(x,r) _rotl(x,r)
|
||||
#else
|
||||
# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
||||
#endif
|
||||
|
||||
|
||||
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
|
||||
{
|
||||
size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
|
||||
if (bitsToRotate == 0) return pattern;
|
||||
return LZ4HC_rotl32(pattern, (int)bitsToRotate);
|
||||
}
|
||||
|
||||
/* LZ4HC_countPattern() :
|
||||
* pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
|
||||
static unsigned
|
||||
LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
|
||||
{
|
||||
const BYTE* const iStart = ip;
|
||||
reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32;
|
||||
reg_t const pattern = (sizeof(pattern)==8) ?
|
||||
(reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
|
||||
|
||||
while (likely(ip < iEnd-(sizeof(pattern)-1))) {
|
||||
reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
|
||||
@ -210,6 +219,16 @@ LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
|
||||
return (unsigned)(iStart - ip);
|
||||
}
|
||||
|
||||
/* LZ4HC_protectDictEnd() :
|
||||
* Checks if the match is in the last 3 bytes of the dictionary, so reading the
|
||||
* 4 byte MINMATCH would overflow.
|
||||
* @returns true if the match index is okay.
|
||||
*/
|
||||
static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
|
||||
{
|
||||
return ((U32)((dictLimit - 1) - matchIndex) >= 3);
|
||||
}
|
||||
|
||||
typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
|
||||
typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
|
||||
|
||||
@ -235,7 +254,7 @@ LZ4HC_InsertAndGetWiderMatch (
|
||||
const U32 dictLimit = hc4->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const U32 ipIndex = (U32)(ip - base);
|
||||
const U32 lowestMatchIndex = (hc4->lowLimit + 64 KB > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
|
||||
const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
|
||||
const BYTE* const dictBase = hc4->dictBase;
|
||||
int const lookBackLength = (int)(ip-iLowLimit);
|
||||
int nbAttempts = maxNbAttempts;
|
||||
@ -252,7 +271,7 @@ LZ4HC_InsertAndGetWiderMatch (
|
||||
DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
|
||||
matchIndex, lowestMatchIndex);
|
||||
|
||||
while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) {
|
||||
while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
|
||||
int matchLength=0;
|
||||
nbAttempts--;
|
||||
assert(matchIndex < ipIndex);
|
||||
@ -294,14 +313,21 @@ LZ4HC_InsertAndGetWiderMatch (
|
||||
if (chainSwap && matchLength==longest) { /* better match => select a better chain */
|
||||
assert(lookBackLength==0); /* search forward only */
|
||||
if (matchIndex + (U32)longest <= ipIndex) {
|
||||
int const kTrigger = 4;
|
||||
U32 distanceToNextMatch = 1;
|
||||
int const end = longest - MINMATCH + 1;
|
||||
int step = 1;
|
||||
int accel = 1 << kTrigger;
|
||||
int pos;
|
||||
for (pos = 0; pos <= longest - MINMATCH; pos++) {
|
||||
for (pos = 0; pos < end; pos += step) {
|
||||
U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
|
||||
step = (accel++ >> kTrigger);
|
||||
if (candidateDist > distanceToNextMatch) {
|
||||
distanceToNextMatch = candidateDist;
|
||||
matchChainPos = (U32)pos;
|
||||
} }
|
||||
accel = 1 << kTrigger;
|
||||
}
|
||||
}
|
||||
if (distanceToNextMatch > 1) {
|
||||
if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
|
||||
matchIndex -= distanceToNextMatch;
|
||||
@ -320,34 +346,61 @@ LZ4HC_InsertAndGetWiderMatch (
|
||||
} else {
|
||||
repeat = rep_not;
|
||||
} }
|
||||
if ( (repeat == rep_confirmed)
|
||||
&& (matchCandidateIdx >= dictLimit) ) { /* same segment only */
|
||||
const BYTE* const matchPtr = base + matchCandidateIdx;
|
||||
if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
|
||||
&& LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
|
||||
const int extDict = matchCandidateIdx < dictLimit;
|
||||
const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
|
||||
if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
|
||||
size_t const forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
|
||||
const BYTE* const lowestMatchPtr = (lowPrefixPtr + LZ4_DISTANCE_MAX >= ip) ? lowPrefixPtr : ip - LZ4_DISTANCE_MAX;
|
||||
size_t const backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
|
||||
size_t const currentSegmentLength = backLength + forwardPatternLength;
|
||||
|
||||
if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
|
||||
&& (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
|
||||
matchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
|
||||
} else {
|
||||
matchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
|
||||
if (lookBackLength==0) { /* no back possible */
|
||||
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
|
||||
if ((size_t)longest < maxML) {
|
||||
assert(base + matchIndex < ip);
|
||||
if (ip - (base+matchIndex) > LZ4_DISTANCE_MAX) break;
|
||||
assert(maxML < 2 GB);
|
||||
longest = (int)maxML;
|
||||
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*startpos = ip;
|
||||
const BYTE* const dictStart = dictBase + hc4->lowLimit;
|
||||
const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
|
||||
size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
|
||||
if (extDict && matchPtr + forwardPatternLength == iLimit) {
|
||||
U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
|
||||
forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
|
||||
}
|
||||
{ const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
|
||||
size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
|
||||
size_t currentSegmentLength;
|
||||
if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
|
||||
U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
|
||||
backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
|
||||
}
|
||||
/* Limit backLength not go further than lowestMatchIndex */
|
||||
backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
|
||||
assert(matchCandidateIdx - backLength >= lowestMatchIndex);
|
||||
currentSegmentLength = backLength + forwardPatternLength;
|
||||
/* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
|
||||
if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
|
||||
&& (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
|
||||
U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
|
||||
if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
|
||||
matchIndex = newMatchIndex;
|
||||
else {
|
||||
/* Can only happen if started in the prefix */
|
||||
assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
|
||||
matchIndex = dictLimit;
|
||||
}
|
||||
{ U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
|
||||
if (distToNextPattern > matchIndex) break; /* avoid overflow */
|
||||
matchIndex -= distToNextPattern;
|
||||
} } }
|
||||
} else {
|
||||
U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
|
||||
if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
|
||||
assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
|
||||
matchIndex = dictLimit;
|
||||
} else {
|
||||
matchIndex = newMatchIndex;
|
||||
if (lookBackLength==0) { /* no back possible */
|
||||
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
|
||||
if ((size_t)longest < maxML) {
|
||||
assert(base + matchIndex != ip);
|
||||
if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
|
||||
assert(maxML < 2 GB);
|
||||
longest = (int)maxML;
|
||||
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*startpos = ip;
|
||||
}
|
||||
{ U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
|
||||
if (distToNextPattern > matchIndex) break; /* avoid overflow */
|
||||
matchIndex -= distToNextPattern;
|
||||
} } } } }
|
||||
continue;
|
||||
} }
|
||||
} } /* PA optimization */
|
||||
@ -358,7 +411,7 @@ LZ4HC_InsertAndGetWiderMatch (
|
||||
} /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
|
||||
|
||||
if ( dict == usingDictCtxHc
|
||||
&& nbAttempts
|
||||
&& nbAttempts > 0
|
||||
&& ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
|
||||
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
|
||||
U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
|
||||
@ -408,74 +461,90 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
|
||||
* @return : 0 if ok,
|
||||
* 1 if buffer issue detected */
|
||||
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
|
||||
const BYTE** ip,
|
||||
BYTE** op,
|
||||
const BYTE** anchor,
|
||||
const BYTE** _ip,
|
||||
BYTE** _op,
|
||||
const BYTE** _anchor,
|
||||
int matchLength,
|
||||
const BYTE* const match,
|
||||
limitedOutput_directive limit,
|
||||
BYTE* oend)
|
||||
{
|
||||
#define ip (*_ip)
|
||||
#define op (*_op)
|
||||
#define anchor (*_anchor)
|
||||
|
||||
size_t length;
|
||||
BYTE* const token = (*op)++;
|
||||
BYTE* const token = op++;
|
||||
|
||||
#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
|
||||
static const BYTE* start = NULL;
|
||||
static U32 totalCost = 0;
|
||||
U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start);
|
||||
U32 const ll = (U32)(*ip - *anchor);
|
||||
U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
|
||||
U32 const ll = (U32)(ip - anchor);
|
||||
U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
|
||||
U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
|
||||
U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
|
||||
if (start==NULL) start = *anchor; /* only works for single segment */
|
||||
if (start==NULL) start = anchor; /* only works for single segment */
|
||||
/* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
|
||||
DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u",
|
||||
DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
|
||||
pos,
|
||||
(U32)(*ip - *anchor), matchLength, (U32)(*ip-match),
|
||||
(U32)(ip - anchor), matchLength, (U32)(ip-match),
|
||||
cost, totalCost);
|
||||
totalCost += cost;
|
||||
#endif
|
||||
|
||||
/* Encode Literal length */
|
||||
length = (size_t)(*ip - *anchor);
|
||||
if ((limit) && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */
|
||||
length = (size_t)(ip - anchor);
|
||||
LZ4_STATIC_ASSERT(notLimited == 0);
|
||||
/* Check output limit */
|
||||
if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
|
||||
DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
|
||||
(int)length, (int)(oend - op));
|
||||
return 1;
|
||||
}
|
||||
if (length >= RUN_MASK) {
|
||||
size_t len = length - RUN_MASK;
|
||||
*token = (RUN_MASK << ML_BITS);
|
||||
for(; len >= 255 ; len -= 255) *(*op)++ = 255;
|
||||
*(*op)++ = (BYTE)len;
|
||||
for(; len >= 255 ; len -= 255) *op++ = 255;
|
||||
*op++ = (BYTE)len;
|
||||
} else {
|
||||
*token = (BYTE)(length << ML_BITS);
|
||||
}
|
||||
|
||||
/* Copy Literals */
|
||||
LZ4_wildCopy8(*op, *anchor, (*op) + length);
|
||||
*op += length;
|
||||
LZ4_wildCopy8(op, anchor, op + length);
|
||||
op += length;
|
||||
|
||||
/* Encode Offset */
|
||||
assert( (*ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
|
||||
LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
|
||||
assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
|
||||
LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
|
||||
|
||||
/* Encode MatchLength */
|
||||
assert(matchLength >= MINMATCH);
|
||||
length = (size_t)matchLength - MINMATCH;
|
||||
if ((limit) && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */
|
||||
if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
|
||||
DEBUGLOG(6, "Not enough room to write match length");
|
||||
return 1; /* Check output limit */
|
||||
}
|
||||
if (length >= ML_MASK) {
|
||||
*token += ML_MASK;
|
||||
length -= ML_MASK;
|
||||
for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; }
|
||||
if (length >= 255) { length -= 255; *(*op)++ = 255; }
|
||||
*(*op)++ = (BYTE)length;
|
||||
for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
|
||||
if (length >= 255) { length -= 255; *op++ = 255; }
|
||||
*op++ = (BYTE)length;
|
||||
} else {
|
||||
*token += (BYTE)(length);
|
||||
}
|
||||
|
||||
/* Prepare next loop */
|
||||
*ip += matchLength;
|
||||
*anchor = *ip;
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#undef ip
|
||||
#undef op
|
||||
#undef anchor
|
||||
|
||||
LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
||||
LZ4HC_CCtx_internal* const ctx,
|
||||
@ -483,7 +552,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
||||
char* const dest,
|
||||
int* srcSizePtr,
|
||||
int const maxOutputSize,
|
||||
unsigned maxNbAttempts,
|
||||
int maxNbAttempts,
|
||||
const limitedOutput_directive limit,
|
||||
const dictCtx_directive dict
|
||||
)
|
||||
@ -513,7 +582,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
||||
/* init */
|
||||
*srcSizePtr = 0;
|
||||
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
|
||||
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
|
||||
/* Main Loop */
|
||||
while (ip <= mflimit) {
|
||||
@ -585,7 +654,11 @@ _Search3:
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
|
||||
ip = start2;
|
||||
optr = op;
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) goto _dest_overflow;
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
|
||||
ml = ml2;
|
||||
ref = ref2;
|
||||
goto _dest_overflow;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -657,17 +730,18 @@ _Search3:
|
||||
_last_literals:
|
||||
/* Encode Last Literals */
|
||||
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
|
||||
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + litLength + lastRunSize;
|
||||
size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + llAdd + lastRunSize;
|
||||
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
|
||||
if (limit && (op + totalSize > oend)) {
|
||||
if (limit == limitedOutput) return 0; /* Check output limit */
|
||||
if (limit == limitedOutput) return 0;
|
||||
/* adapt lastRunSize to fill 'dest' */
|
||||
lastRunSize = (size_t)(oend - op) - 1;
|
||||
litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
lastRunSize -= litLength;
|
||||
lastRunSize = (size_t)(oend - op) - 1 /*token*/;
|
||||
llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
|
||||
lastRunSize -= llAdd;
|
||||
}
|
||||
ip = anchor + lastRunSize;
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
|
||||
ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
|
||||
|
||||
if (lastRunSize >= RUN_MASK) {
|
||||
size_t accumulator = lastRunSize - RUN_MASK;
|
||||
@ -687,9 +761,25 @@ _last_literals:
|
||||
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
/* Assumption : ip, anchor, ml and ref must be set correctly */
|
||||
size_t const ll = (size_t)(ip - anchor);
|
||||
size_t const ll_addbytes = (ll + 240) / 255;
|
||||
size_t const ll_totalCost = 1 + ll_addbytes + ll;
|
||||
BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
|
||||
DEBUGLOG(6, "Last sequence overflowing");
|
||||
op = optr; /* restore correct out pointer */
|
||||
if (op + ll_totalCost <= maxLitPos) {
|
||||
/* ll validated; now adjust match length */
|
||||
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
|
||||
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
|
||||
assert(maxMlSize < INT_MAX); assert(ml >= 0);
|
||||
if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
|
||||
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
|
||||
LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
|
||||
} }
|
||||
goto _last_literals;
|
||||
}
|
||||
/* compression failed */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -700,7 +790,7 @@ static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
|
||||
int const nbSearches, size_t sufficient_len,
|
||||
const limitedOutput_directive limit, int const fullUpdate,
|
||||
const dictCtx_directive dict,
|
||||
HCfavor_e favorDecSpeed);
|
||||
const HCfavor_e favorDecSpeed);
|
||||
|
||||
|
||||
LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
||||
@ -717,7 +807,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
||||
typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
|
||||
typedef struct {
|
||||
lz4hc_strat_e strat;
|
||||
U32 nbSearches;
|
||||
int nbSearches;
|
||||
U32 targetLength;
|
||||
} cParams_t;
|
||||
static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
|
||||
@ -736,7 +826,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
||||
{ lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
|
||||
};
|
||||
|
||||
DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d)", ctx, src, *srcSizePtr);
|
||||
DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
|
||||
ctx, src, *srcSizePtr, limit);
|
||||
|
||||
if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
|
||||
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
|
||||
@ -756,7 +847,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
||||
assert(cParam.strat == lz4opt);
|
||||
result = LZ4HC_compress_optimal(ctx,
|
||||
src, dst, srcSizePtr, dstCapacity,
|
||||
(int)cParam.nbSearches, cParam.targetLength, limit,
|
||||
cParam.nbSearches, cParam.targetLength, limit,
|
||||
cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
|
||||
dict, favor);
|
||||
}
|
||||
@ -829,27 +920,22 @@ LZ4HC_compress_generic (
|
||||
|
||||
int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
|
||||
|
||||
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
|
||||
* it reports an aligment of 8-bytes,
|
||||
* while actually aligning LZ4_streamHC_t on 4 bytes. */
|
||||
static size_t LZ4_streamHC_t_alignment(void)
|
||||
{
|
||||
struct { char c; LZ4_streamHC_t t; } t_a;
|
||||
return sizeof(t_a) - sizeof(t_a.t);
|
||||
}
|
||||
#if LZ4_ALIGN_TEST
|
||||
typedef struct { char c; LZ4_streamHC_t t; } t_a;
|
||||
return sizeof(t_a) - sizeof(LZ4_streamHC_t);
|
||||
#else
|
||||
return 1; /* effectively disabled */
|
||||
#endif
|
||||
}
|
||||
|
||||
/* state is presumed correctly initialized,
|
||||
* in which case its size and alignment have already been validate */
|
||||
int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
|
||||
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
|
||||
* it reports an aligment of 8-bytes,
|
||||
* while actually aligning LZ4_streamHC_t on 4 bytes. */
|
||||
assert(((size_t)state & (LZ4_streamHC_t_alignment() - 1)) == 0); /* check alignment */
|
||||
#endif
|
||||
if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
|
||||
if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
|
||||
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
|
||||
LZ4HC_init_internal (ctx, (const BYTE*)src);
|
||||
if (dstCapacity < LZ4_compressBound(srcSize))
|
||||
@ -898,10 +984,11 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
|
||||
/* allocation */
|
||||
LZ4_streamHC_t* LZ4_createStreamHC(void)
|
||||
{
|
||||
LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
|
||||
if (LZ4_streamHCPtr==NULL) return NULL;
|
||||
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); /* full initialization, malloc'ed buffer can be full of garbage */
|
||||
return LZ4_streamHCPtr;
|
||||
LZ4_streamHC_t* const state =
|
||||
(LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
|
||||
if (state == NULL) return NULL;
|
||||
LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
|
||||
return state;
|
||||
}
|
||||
|
||||
int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
|
||||
@ -916,22 +1003,16 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
|
||||
LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
|
||||
{
|
||||
LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
|
||||
if (buffer == NULL) return NULL;
|
||||
if (size < sizeof(LZ4_streamHC_t)) return NULL;
|
||||
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
|
||||
* it reports an aligment of 8-bytes,
|
||||
* while actually aligning LZ4_streamHC_t on 4 bytes. */
|
||||
if (((size_t)buffer) & (LZ4_streamHC_t_alignment() - 1)) return NULL; /* alignment check */
|
||||
#endif
|
||||
/* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
|
||||
LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
|
||||
DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", LZ4_streamHCPtr, (unsigned)size);
|
||||
/* end-base will trigger a clearTable on starting compression */
|
||||
LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1;
|
||||
LZ4_streamHCPtr->internal_donotuse.base = NULL;
|
||||
LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
|
||||
LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0;
|
||||
LZ4_streamHCPtr->internal_donotuse.dirty = 0;
|
||||
DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
|
||||
/* check conditions */
|
||||
if (buffer == NULL) return NULL;
|
||||
if (size < sizeof(LZ4_streamHC_t)) return NULL;
|
||||
if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
|
||||
/* init */
|
||||
{ LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
|
||||
MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
|
||||
LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
|
||||
return LZ4_streamHCPtr;
|
||||
}
|
||||
@ -976,7 +1057,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
const char* dictionary, int dictSize)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
|
||||
DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
|
||||
assert(LZ4_streamHCPtr != NULL);
|
||||
if (dictSize > 64 KB) {
|
||||
dictionary += (size_t)dictSize - 64 KB;
|
||||
@ -1012,16 +1093,20 @@ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBl
|
||||
ctxPtr->base = newBlock - ctxPtr->dictLimit;
|
||||
ctxPtr->end = newBlock;
|
||||
ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
|
||||
|
||||
/* cannot reference an extDict and a dictCtx at the same time */
|
||||
ctxPtr->dictCtx = NULL;
|
||||
}
|
||||
|
||||
static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
const char* src, char* dst,
|
||||
int* srcSizePtr, int dstCapacity,
|
||||
limitedOutput_directive limit)
|
||||
static int
|
||||
LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
const char* src, char* dst,
|
||||
int* srcSizePtr, int dstCapacity,
|
||||
limitedOutput_directive limit)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
|
||||
LZ4_streamHCPtr, src, *srcSizePtr);
|
||||
DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
|
||||
LZ4_streamHCPtr, src, *srcSizePtr, limit);
|
||||
assert(ctxPtr != NULL);
|
||||
/* auto-init if forgotten */
|
||||
if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
@ -1045,8 +1130,7 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
if (sourceEnd > dictEnd) sourceEnd = dictEnd;
|
||||
ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
|
||||
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
}
|
||||
}
|
||||
} }
|
||||
|
||||
return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
|
||||
}
|
||||
@ -1066,23 +1150,30 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
|
||||
|
||||
|
||||
|
||||
/* dictionary saving */
|
||||
|
||||
/* LZ4_saveDictHC :
|
||||
* save history content
|
||||
* into a user-provided buffer
|
||||
* which is then used to continue compression
|
||||
*/
|
||||
int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
|
||||
DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
|
||||
DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
|
||||
assert(prefixSize >= 0);
|
||||
if (dictSize > 64 KB) dictSize = 64 KB;
|
||||
if (dictSize < 4) dictSize = 0;
|
||||
if (dictSize > prefixSize) dictSize = prefixSize;
|
||||
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
if (safeBuffer == NULL) assert(dictSize == 0);
|
||||
if (dictSize > 0)
|
||||
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
|
||||
streamPtr->end = (const BYTE*)safeBuffer + dictSize;
|
||||
streamPtr->base = streamPtr->end - endIndex;
|
||||
streamPtr->dictLimit = endIndex - (U32)dictSize;
|
||||
streamPtr->lowLimit = endIndex - (U32)dictSize;
|
||||
if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
|
||||
if (streamPtr->nextToUpdate < streamPtr->dictLimit)
|
||||
streamPtr->nextToUpdate = streamPtr->dictLimit;
|
||||
}
|
||||
return dictSize;
|
||||
}
|
||||
@ -1232,8 +1323,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
const dictCtx_directive dict,
|
||||
const HCfavor_e favorDecSpeed)
|
||||
{
|
||||
int retval = 0;
|
||||
#define TRAILING_LITERALS 3
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
|
||||
#else
|
||||
LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
|
||||
#endif
|
||||
|
||||
const BYTE* ip = (const BYTE*) source;
|
||||
const BYTE* anchor = ip;
|
||||
@ -1243,15 +1339,19 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
BYTE* op = (BYTE*) dst;
|
||||
BYTE* opSaved = (BYTE*) dst;
|
||||
BYTE* oend = op + dstCapacity;
|
||||
int ovml = MINMATCH; /* overflow - last sequence */
|
||||
const BYTE* ovref = NULL;
|
||||
|
||||
/* init */
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
if (opt == NULL) goto _return_label;
|
||||
#endif
|
||||
DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
|
||||
*srcSizePtr = 0;
|
||||
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
|
||||
if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
|
||||
|
||||
/* Main Loop */
|
||||
assert(ip - anchor < LZ4_MAX_INPUT_SIZE);
|
||||
while (ip <= mflimit) {
|
||||
int const llen = (int)(ip - anchor);
|
||||
int best_mlen, best_off;
|
||||
@ -1265,8 +1365,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
int const firstML = firstMatch.len;
|
||||
const BYTE* const matchPos = ip - firstMatch.off;
|
||||
opSaved = op;
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
|
||||
ovml = firstML;
|
||||
ovref = matchPos;
|
||||
goto _dest_overflow;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1408,7 +1511,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
best_off = opt[last_match_pos].off;
|
||||
cur = last_match_pos - best_mlen;
|
||||
|
||||
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
|
||||
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
|
||||
assert(cur < LZ4_OPT_NUM);
|
||||
assert(last_match_pos >= 1); /* == 1 when only one candidate */
|
||||
DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
|
||||
@ -1438,25 +1541,31 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
assert(ml >= MINMATCH);
|
||||
assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
|
||||
opSaved = op;
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
|
||||
ovml = ml;
|
||||
ovref = ip - offset;
|
||||
goto _dest_overflow;
|
||||
} }
|
||||
} } }
|
||||
} /* while (ip <= mflimit) */
|
||||
|
||||
_last_literals:
|
||||
_last_literals:
|
||||
/* Encode Last Literals */
|
||||
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
|
||||
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + litLength + lastRunSize;
|
||||
size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + llAdd + lastRunSize;
|
||||
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
|
||||
if (limit && (op + totalSize > oend)) {
|
||||
if (limit == limitedOutput) return 0; /* Check output limit */
|
||||
if (limit == limitedOutput) { /* Check output limit */
|
||||
retval = 0;
|
||||
goto _return_label;
|
||||
}
|
||||
/* adapt lastRunSize to fill 'dst' */
|
||||
lastRunSize = (size_t)(oend - op) - 1;
|
||||
litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
lastRunSize -= litLength;
|
||||
lastRunSize = (size_t)(oend - op) - 1 /*token*/;
|
||||
llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
|
||||
lastRunSize -= llAdd;
|
||||
}
|
||||
ip = anchor + lastRunSize;
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
|
||||
ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
|
||||
|
||||
if (lastRunSize >= RUN_MASK) {
|
||||
size_t accumulator = lastRunSize - RUN_MASK;
|
||||
@ -1472,14 +1581,35 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
|
||||
/* End */
|
||||
*srcSizePtr = (int) (((const char*)ip) - source);
|
||||
return (int) ((char*)op-dst);
|
||||
retval = (int) ((char*)op-dst);
|
||||
goto _return_label;
|
||||
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
op = opSaved; /* restore correct out pointer */
|
||||
goto _last_literals;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
/* Assumption : ip, anchor, ovml and ovref must be set correctly */
|
||||
size_t const ll = (size_t)(ip - anchor);
|
||||
size_t const ll_addbytes = (ll + 240) / 255;
|
||||
size_t const ll_totalCost = 1 + ll_addbytes + ll;
|
||||
BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
|
||||
DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
|
||||
op = opSaved; /* restore correct out pointer */
|
||||
if (op + ll_totalCost <= maxLitPos) {
|
||||
/* ll validated; now adjust match length */
|
||||
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
|
||||
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
|
||||
assert(maxMlSize < INT_MAX); assert(ovml >= 0);
|
||||
if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
|
||||
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
|
||||
DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
|
||||
DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
|
||||
LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
|
||||
DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
|
||||
} }
|
||||
goto _last_literals;
|
||||
}
|
||||
_return_label:
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
FREEMEM(opt);
|
||||
#endif
|
||||
return retval;
|
||||
}
|
||||
|
@ -31,15 +31,17 @@
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*/
|
||||
#ifndef TRACY_LZ4_HC_H_19834876238432
|
||||
#define TRACY_LZ4_HC_H_19834876238432
|
||||
#ifndef LZ4_HC_H_19834876238432
|
||||
#define LZ4_HC_H_19834876238432
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* --- Dependency --- */
|
||||
/* note : lz4hc requires lz4.h/lz4.c for compilation */
|
||||
#include "tracy_lz4.hpp" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
|
||||
#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
|
||||
/* --- Useful constants --- */
|
||||
#define LZ4HC_CLEVEL_MIN 3
|
||||
@ -196,57 +198,32 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
|
||||
#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
|
||||
|
||||
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
||||
struct LZ4HC_CCtx_internal
|
||||
{
|
||||
uint32_t hashTable[LZ4HC_HASHTABLESIZE];
|
||||
uint16_t chainTable[LZ4HC_MAXD];
|
||||
const uint8_t* end; /* next block here to continue on current prefix */
|
||||
const uint8_t* base; /* All index relative to this position */
|
||||
const uint8_t* dictBase; /* alternate base for extDict */
|
||||
uint32_t dictLimit; /* below that point, need extDict */
|
||||
uint32_t lowLimit; /* below that point, no more dict */
|
||||
uint32_t nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
int8_t favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
int8_t dirty; /* stream has to be fully reset if this flag is set */
|
||||
LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
|
||||
LZ4_u16 chainTable[LZ4HC_MAXD];
|
||||
const LZ4_byte* end; /* next block here to continue on current prefix */
|
||||
const LZ4_byte* base; /* All index relative to this position */
|
||||
const LZ4_byte* dictBase; /* alternate base for extDict */
|
||||
LZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
LZ4_u32 lowLimit; /* below that point, no more dict */
|
||||
LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
|
||||
const LZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
||||
struct LZ4HC_CCtx_internal
|
||||
{
|
||||
unsigned int hashTable[LZ4HC_HASHTABLESIZE];
|
||||
unsigned short chainTable[LZ4HC_MAXD];
|
||||
const unsigned char* end; /* next block here to continue on current prefix */
|
||||
const unsigned char* base; /* All index relative to this position */
|
||||
const unsigned char* dictBase; /* alternate base for extDict */
|
||||
unsigned int dictLimit; /* below that point, need extDict */
|
||||
unsigned int lowLimit; /* below that point, no more dict */
|
||||
unsigned int nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
char favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
char dirty; /* stream has to be fully reset if this flag is set */
|
||||
const LZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* Do not use these definitions directly !
|
||||
* Declare or allocate an LZ4_streamHC_t instead.
|
||||
*/
|
||||
#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56 + ((sizeof(void*)==16) ? 56 : 0) /* AS400*/ ) /* 262200 or 262256*/
|
||||
#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t))
|
||||
#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
|
||||
#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
|
||||
union LZ4_streamHC_u {
|
||||
size_t table[LZ4_STREAMHCSIZE_SIZET];
|
||||
void* table[LZ4_STREAMHCSIZE_VOIDP];
|
||||
LZ4HC_CCtx_internal internal_donotuse;
|
||||
}; /* previously typedef'd to LZ4_streamHC_t */
|
||||
|
||||
@ -315,7 +292,9 @@ LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStre
|
||||
LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LZ4_HC_H_19834876238432 */
|
||||
|
||||
@ -332,8 +311,12 @@ LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionL
|
||||
#ifndef LZ4_HC_SLO_098092834
|
||||
#define LZ4_HC_SLO_098092834
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
|
||||
#include "lz4.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
|
||||
* It's possible to change compression level
|
||||
@ -422,7 +405,9 @@ LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
|
||||
LZ4_streamHC_t *working_stream,
|
||||
const LZ4_streamHC_t *dictionary_stream);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LZ4_HC_SLO_098092834 */
|
||||
#endif /* LZ4_HC_STATIC_LINKING_ONLY */
|
||||
|
Loading…
Reference in New Issue
Block a user