mirror of
https://github.com/wolfpld/tracy.git
synced 2024-11-22 14:44:34 +00:00
Bump zstd to 1.5.1.
This commit is contained in:
parent
6fa3491bb7
commit
8c45ed33fd
@ -165,11 +165,13 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
||||||
@ -192,6 +194,9 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S" />
|
||||||
|
</ItemGroup>
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
<ImportGroup Label="ExtensionTargets">
|
<ImportGroup Label="ExtensionTargets">
|
||||||
</ImportGroup>
|
</ImportGroup>
|
||||||
|
@ -344,5 +344,16 @@
|
|||||||
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
||||||
<Filter>common</Filter>
|
<Filter>common</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h">
|
||||||
|
<Filter>zstd\common</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h">
|
||||||
|
<Filter>zstd\compress</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S">
|
||||||
|
<Filter>zstd\decompress</Filter>
|
||||||
|
</None>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
@ -165,11 +165,13 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
||||||
@ -192,6 +194,9 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S" />
|
||||||
|
</ItemGroup>
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
<ImportGroup Label="ExtensionTargets">
|
<ImportGroup Label="ExtensionTargets">
|
||||||
</ImportGroup>
|
</ImportGroup>
|
||||||
|
@ -344,5 +344,16 @@
|
|||||||
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
||||||
<Filter>common</Filter>
|
<Filter>common</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h">
|
||||||
|
<Filter>zstd\common</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h">
|
||||||
|
<Filter>zstd\compress</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S">
|
||||||
|
<Filter>zstd\decompress</Filter>
|
||||||
|
</None>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
@ -161,11 +161,13 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
||||||
@ -188,6 +190,9 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S" />
|
||||||
|
</ItemGroup>
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
<ImportGroup Label="ExtensionTargets">
|
<ImportGroup Label="ExtensionTargets">
|
||||||
</ImportGroup>
|
</ImportGroup>
|
||||||
|
@ -329,5 +329,16 @@
|
|||||||
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
||||||
<Filter>common</Filter>
|
<Filter>common</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h">
|
||||||
|
<Filter>zstd\common</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h">
|
||||||
|
<Filter>zstd\compress</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S">
|
||||||
|
<Filter>zstd\decompress</Filter>
|
||||||
|
</None>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
@ -250,11 +250,13 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
||||||
@ -298,5 +300,8 @@
|
|||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<Manifest Include="Tracy.manifest" />
|
<Manifest Include="Tracy.manifest" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S" />
|
||||||
|
</ItemGroup>
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
</Project>
|
</Project>
|
@ -578,6 +578,12 @@
|
|||||||
<ClInclude Include="..\..\src\FiraCodeRetina.hpp">
|
<ClInclude Include="..\..\src\FiraCodeRetina.hpp">
|
||||||
<Filter>src</Filter>
|
<Filter>src</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h">
|
||||||
|
<Filter>zstd\common</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h">
|
||||||
|
<Filter>zstd\compress</Filter>
|
||||||
|
</ClInclude>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<Natvis Include="DebugVis.natvis" />
|
<Natvis Include="DebugVis.natvis" />
|
||||||
@ -588,4 +594,9 @@
|
|||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<Manifest Include="Tracy.manifest" />
|
<Manifest Include="Tracy.manifest" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S">
|
||||||
|
<Filter>zstd\decompress</Filter>
|
||||||
|
</None>
|
||||||
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
@ -165,11 +165,13 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
<ClInclude Include="..\..\..\zstd\common\huf.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
<ClInclude Include="..\..\..\zstd\common\mem.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
<ClInclude Include="..\..\..\zstd\common\pool.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
<ClInclude Include="..\..\..\zstd\common\threading.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
<ClInclude Include="..\..\..\zstd\common\xxhash.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_deps.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_internal.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
<ClInclude Include="..\..\..\zstd\common\zstd_trace.h" />
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
<ClInclude Include="..\..\..\zstd\compress\hist.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstdmt_compress.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
<ClInclude Include="..\..\..\zstd\compress\zstd_compress_internal.h" />
|
||||||
@ -192,6 +194,9 @@
|
|||||||
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
<ClInclude Include="..\..\..\zstd\zstd.h" />
|
||||||
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
<ClInclude Include="..\..\..\zstd\zstd_errors.h" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S" />
|
||||||
|
</ItemGroup>
|
||||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
<ImportGroup Label="ExtensionTargets">
|
<ImportGroup Label="ExtensionTargets">
|
||||||
</ImportGroup>
|
</ImportGroup>
|
||||||
|
@ -344,5 +344,16 @@
|
|||||||
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
<ClInclude Include="..\..\..\common\TracyStackFrames.hpp">
|
||||||
<Filter>common</Filter>
|
<Filter>common</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\common\portability_macros.h">
|
||||||
|
<Filter>zstd\common</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\..\..\zstd\compress\clevels.h">
|
||||||
|
<Filter>zstd\compress</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="..\..\..\zstd\decompress\huf_decompress_amd64.S">
|
||||||
|
<Filter>zstd\decompress</Filter>
|
||||||
|
</None>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
@ -145,8 +145,14 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
|
|||||||
# if STATIC_BMI2 == 1
|
# if STATIC_BMI2 == 1
|
||||||
return _lzcnt_u32(val) ^ 31;
|
return _lzcnt_u32(val) ^ 31;
|
||||||
# else
|
# else
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
|
unsigned long r;
|
||||||
|
_BitScanReverse(&r, val);
|
||||||
|
return (unsigned)r;
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||||
return __builtin_clz (val) ^ 31;
|
return __builtin_clz (val) ^ 31;
|
||||||
@ -293,22 +299,22 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
|
|||||||
switch(srcSize)
|
switch(srcSize)
|
||||||
{
|
{
|
||||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
@ -332,7 +338,16 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c
|
|||||||
U32 const regMask = sizeof(bitContainer)*8 - 1;
|
U32 const regMask = sizeof(bitContainer)*8 - 1;
|
||||||
/* if start > regMask, bitstream is corrupted, and result is undefined */
|
/* if start > regMask, bitstream is corrupted, and result is undefined */
|
||||||
assert(nbBits < BIT_MASK_SIZE);
|
assert(nbBits < BIT_MASK_SIZE);
|
||||||
|
/* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
|
||||||
|
* than accessing memory. When bmi2 instruction is not present, we consider
|
||||||
|
* such cpus old (pre-Haswell, 2013) and their performance is not of that
|
||||||
|
* importance.
|
||||||
|
*/
|
||||||
|
#if defined(__x86_64__) || defined(_M_X86)
|
||||||
|
return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
|
||||||
|
#else
|
||||||
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
|
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
#ifndef ZSTD_COMPILER_H
|
#ifndef ZSTD_COMPILER_H
|
||||||
#define ZSTD_COMPILER_H
|
#define ZSTD_COMPILER_H
|
||||||
|
|
||||||
|
#include "portability_macros.h"
|
||||||
|
|
||||||
/*-*******************************************************
|
/*-*******************************************************
|
||||||
* Compiler specifics
|
* Compiler specifics
|
||||||
*********************************************************/
|
*********************************************************/
|
||||||
@ -40,7 +42,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
|
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
|
||||||
This explictly marks such functions as __cdecl so that the code will still compile
|
This explicitly marks such functions as __cdecl so that the code will still compile
|
||||||
if a CC other than __cdecl has been made the default.
|
if a CC other than __cdecl has been made the default.
|
||||||
*/
|
*/
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
@ -92,29 +94,17 @@
|
|||||||
|
|
||||||
|
|
||||||
/* target attribute */
|
/* target attribute */
|
||||||
#ifndef __has_attribute
|
|
||||||
#define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
|
|
||||||
#endif
|
|
||||||
#if defined(__GNUC__) || defined(__ICCARM__)
|
#if defined(__GNUC__) || defined(__ICCARM__)
|
||||||
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
|
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
|
||||||
#else
|
#else
|
||||||
# define TARGET_ATTRIBUTE(target)
|
# define TARGET_ATTRIBUTE(target)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Enable runtime BMI2 dispatch based on the CPU.
|
/* Target attribute for BMI2 dynamic dispatch.
|
||||||
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
* Enable lzcnt, bmi, and bmi2.
|
||||||
|
* We test for bmi1 & bmi2. lzcnt is included in bmi1.
|
||||||
*/
|
*/
|
||||||
#ifndef DYNAMIC_BMI2
|
#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
|
||||||
#if ((defined(__clang__) && __has_attribute(__target__)) \
|
|
||||||
|| (defined(__GNUC__) \
|
|
||||||
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
|
|
||||||
&& (defined(__x86_64__) || defined(_M_X86)) \
|
|
||||||
&& !defined(__BMI2__)
|
|
||||||
# define DYNAMIC_BMI2 1
|
|
||||||
#else
|
|
||||||
# define DYNAMIC_BMI2 0
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* prefetch
|
/* prefetch
|
||||||
* can be disabled, by declaring NO_PREFETCH build macro */
|
* can be disabled, by declaring NO_PREFETCH build macro */
|
||||||
@ -150,8 +140,9 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* vectorization
|
/* vectorization
|
||||||
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
|
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
|
||||||
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
|
* and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
|
||||||
|
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
|
||||||
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
|
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
|
||||||
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
||||||
# else
|
# else
|
||||||
@ -197,25 +188,91 @@
|
|||||||
#define STATIC_BMI2 0
|
#define STATIC_BMI2 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* compat. with non-clang compilers */
|
/* compile time determination of SIMD support */
|
||||||
#ifndef __has_builtin
|
#if !defined(ZSTD_NO_INTRINSICS)
|
||||||
# define __has_builtin(x) 0
|
# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
|
||||||
|
# define ZSTD_ARCH_X86_SSE2
|
||||||
|
# endif
|
||||||
|
# if defined(__ARM_NEON) || defined(_M_ARM64)
|
||||||
|
# define ZSTD_ARCH_ARM_NEON
|
||||||
|
# endif
|
||||||
|
#
|
||||||
|
# if defined(ZSTD_ARCH_X86_SSE2)
|
||||||
|
# include <emmintrin.h>
|
||||||
|
# elif defined(ZSTD_ARCH_ARM_NEON)
|
||||||
|
# include <arm_neon.h>
|
||||||
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* compat. with non-clang compilers */
|
/* C-language Attributes are added in C23. */
|
||||||
#ifndef __has_feature
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
|
||||||
# define __has_feature(x) 0
|
# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
|
||||||
#endif
|
|
||||||
|
|
||||||
/* detects whether we are being compiled under msan */
|
|
||||||
#ifndef ZSTD_MEMORY_SANITIZER
|
|
||||||
# if __has_feature(memory_sanitizer)
|
|
||||||
# define ZSTD_MEMORY_SANITIZER 1
|
|
||||||
#else
|
#else
|
||||||
# define ZSTD_MEMORY_SANITIZER 0
|
# define ZSTD_HAS_C_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Only use C++ attributes in C++. Some compilers report support for C++
|
||||||
|
* attributes when compiling with C.
|
||||||
|
*/
|
||||||
|
#if defined(__cplusplus) && defined(__has_cpp_attribute)
|
||||||
|
# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
|
||||||
|
#else
|
||||||
|
# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
|
||||||
|
* - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
|
||||||
|
* - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
|
||||||
|
* - Else: __attribute__((__fallthrough__))
|
||||||
|
*/
|
||||||
|
#ifndef ZSTD_FALLTHROUGH
|
||||||
|
# if ZSTD_HAS_C_ATTRIBUTE(fallthrough)
|
||||||
|
# define ZSTD_FALLTHROUGH [[fallthrough]]
|
||||||
|
# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough)
|
||||||
|
# define ZSTD_FALLTHROUGH [[fallthrough]]
|
||||||
|
# elif __has_attribute(__fallthrough__)
|
||||||
|
/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon
|
||||||
|
* gcc complains about: a label can only be part of a statement and a declaration is not a statement.
|
||||||
|
*/
|
||||||
|
# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__))
|
||||||
|
# else
|
||||||
|
# define ZSTD_FALLTHROUGH
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*-**************************************************************
|
||||||
|
* Alignment check
|
||||||
|
*****************************************************************/
|
||||||
|
|
||||||
|
/* this test was initially positioned in mem.h,
|
||||||
|
* but this file is removed (or replaced) for linux kernel
|
||||||
|
* so it's now hosted in compiler.h,
|
||||||
|
* which remains valid for both user & kernel spaces.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ZSTD_ALIGNOF
|
||||||
|
# if defined(__GNUC__) || defined(_MSC_VER)
|
||||||
|
/* covers gcc, clang & MSVC */
|
||||||
|
/* note : this section must come first, before C11,
|
||||||
|
* due to a limitation in the kernel source generator */
|
||||||
|
# define ZSTD_ALIGNOF(T) __alignof(T)
|
||||||
|
|
||||||
|
# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
|
||||||
|
/* C11 support */
|
||||||
|
# include <stdalign.h>
|
||||||
|
# define ZSTD_ALIGNOF(T) alignof(T)
|
||||||
|
|
||||||
|
# else
|
||||||
|
/* No known support for alignof() - imperfect backup */
|
||||||
|
# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
|
||||||
|
|
||||||
|
# endif
|
||||||
|
#endif /* ZSTD_ALIGNOF */
|
||||||
|
|
||||||
|
/*-**************************************************************
|
||||||
|
* Sanitizer
|
||||||
|
*****************************************************************/
|
||||||
|
|
||||||
#if ZSTD_MEMORY_SANITIZER
|
#if ZSTD_MEMORY_SANITIZER
|
||||||
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
||||||
* We therefore declare the functions we need ourselves, rather than trying to
|
* We therefore declare the functions we need ourselves, rather than trying to
|
||||||
@ -237,17 +294,6 @@ void __msan_poison(const volatile void *a, size_t size);
|
|||||||
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* detects whether we are being compiled under asan */
|
|
||||||
#ifndef ZSTD_ADDRESS_SANITIZER
|
|
||||||
# if __has_feature(address_sanitizer)
|
|
||||||
# define ZSTD_ADDRESS_SANITIZER 1
|
|
||||||
# elif defined(__SANITIZE_ADDRESS__)
|
|
||||||
# define ZSTD_ADDRESS_SANITIZER 1
|
|
||||||
# else
|
|
||||||
# define ZSTD_ADDRESS_SANITIZER 0
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if ZSTD_ADDRESS_SANITIZER
|
#if ZSTD_ADDRESS_SANITIZER
|
||||||
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
|
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
|
||||||
* We therefore declare the functions we need ourselves, rather than trying to
|
* We therefore declare the functions we need ourselves, rather than trying to
|
||||||
|
@ -43,8 +43,14 @@ static U32 FSE_ctz(U32 val)
|
|||||||
assert(val != 0);
|
assert(val != 0);
|
||||||
{
|
{
|
||||||
# if defined(_MSC_VER) /* Visual */
|
# if defined(_MSC_VER) /* Visual */
|
||||||
unsigned long r=0;
|
if (val != 0) {
|
||||||
return _BitScanForward(&r, val) ? (unsigned)r : 0;
|
unsigned long r;
|
||||||
|
_BitScanForward(&r, val);
|
||||||
|
return (unsigned)r;
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
||||||
return __builtin_ctz(val);
|
return __builtin_ctz(val);
|
||||||
# elif defined(__ICCARM__) /* IAR Intrinsic */
|
# elif defined(__ICCARM__) /* IAR Intrinsic */
|
||||||
@ -217,7 +223,7 @@ static size_t FSE_readNCount_body_default(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if DYNAMIC_BMI2
|
#if DYNAMIC_BMI2
|
||||||
TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
|
BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
|
||||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||||
const void* headerBuffer, size_t hbSize)
|
const void* headerBuffer, size_t hbSize)
|
||||||
{
|
{
|
||||||
@ -299,7 +305,7 @@ HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
|||||||
ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
||||||
weightTotal = 0;
|
weightTotal = 0;
|
||||||
{ U32 n; for (n=0; n<oSize; n++) {
|
{ U32 n; for (n=0; n<oSize; n++) {
|
||||||
if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||||
rankStats[huffWeight[n]]++;
|
rankStats[huffWeight[n]]++;
|
||||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||||
} }
|
} }
|
||||||
@ -337,7 +343,7 @@ static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* r
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if DYNAMIC_BMI2
|
#if DYNAMIC_BMI2
|
||||||
static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
void* workSpace, size_t wkspSize)
|
void* workSpace, size_t wkspSize)
|
||||||
|
@ -22,6 +22,8 @@ extern "C" {
|
|||||||
* Dependencies
|
* Dependencies
|
||||||
******************************************/
|
******************************************/
|
||||||
#include "../zstd_errors.h" /* enum list */
|
#include "../zstd_errors.h" /* enum list */
|
||||||
|
#include "compiler.h"
|
||||||
|
#include "debug.h"
|
||||||
#include "zstd_deps.h" /* size_t */
|
#include "zstd_deps.h" /* size_t */
|
||||||
|
|
||||||
|
|
||||||
@ -73,6 +75,83 @@ ERR_STATIC const char* ERR_getErrorName(size_t code)
|
|||||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ignore: this is an internal helper.
|
||||||
|
*
|
||||||
|
* This is a helper function to help force C99-correctness during compilation.
|
||||||
|
* Under strict compilation modes, variadic macro arguments can't be empty.
|
||||||
|
* However, variadic function arguments can be. Using a function therefore lets
|
||||||
|
* us statically check that at least one (string) argument was passed,
|
||||||
|
* independent of the compilation flags.
|
||||||
|
*/
|
||||||
|
static INLINE_KEYWORD UNUSED_ATTR
|
||||||
|
void _force_has_format_string(const char *format, ...) {
|
||||||
|
(void)format;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ignore: this is an internal helper.
|
||||||
|
*
|
||||||
|
* We want to force this function invocation to be syntactically correct, but
|
||||||
|
* we don't want to force runtime evaluation of its arguments.
|
||||||
|
*/
|
||||||
|
#define _FORCE_HAS_FORMAT_STRING(...) \
|
||||||
|
if (0) { \
|
||||||
|
_force_has_format_string(__VA_ARGS__); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ERR_QUOTE(str) #str
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the specified error if the condition evaluates to true.
|
||||||
|
*
|
||||||
|
* In debug modes, prints additional information.
|
||||||
|
* In order to do that (particularly, printing the conditional that failed),
|
||||||
|
* this can't just wrap RETURN_ERROR().
|
||||||
|
*/
|
||||||
|
#define RETURN_ERROR_IF(cond, err, ...) \
|
||||||
|
if (cond) { \
|
||||||
|
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
|
||||||
|
__FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
|
||||||
|
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||||
|
RAWLOG(3, ": " __VA_ARGS__); \
|
||||||
|
RAWLOG(3, "\n"); \
|
||||||
|
return ERROR(err); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unconditionally return the specified error.
|
||||||
|
*
|
||||||
|
* In debug modes, prints additional information.
|
||||||
|
*/
|
||||||
|
#define RETURN_ERROR(err, ...) \
|
||||||
|
do { \
|
||||||
|
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
|
||||||
|
__FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
|
||||||
|
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||||
|
RAWLOG(3, ": " __VA_ARGS__); \
|
||||||
|
RAWLOG(3, "\n"); \
|
||||||
|
return ERROR(err); \
|
||||||
|
} while(0);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the provided expression evaluates to an error code, returns that error code.
|
||||||
|
*
|
||||||
|
* In debug modes, prints additional information.
|
||||||
|
*/
|
||||||
|
#define FORWARD_IF_ERROR(err, ...) \
|
||||||
|
do { \
|
||||||
|
size_t const err_code = (err); \
|
||||||
|
if (ERR_isError(err_code)) { \
|
||||||
|
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
|
||||||
|
__FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
|
||||||
|
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||||
|
RAWLOG(3, ": " __VA_ARGS__); \
|
||||||
|
RAWLOG(3, "\n"); \
|
||||||
|
return err_code; \
|
||||||
|
} \
|
||||||
|
} while(0);
|
||||||
|
|
||||||
#if defined (__cplusplus)
|
#if defined (__cplusplus)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -336,8 +336,9 @@ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
|
|||||||
/* FSE_buildCTable_wksp() :
|
/* FSE_buildCTable_wksp() :
|
||||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||||
* `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
|
* `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
|
||||||
|
* See FSE_buildCTable_wksp() for breakdown of workspace usage.
|
||||||
*/
|
*/
|
||||||
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
|
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
|
||||||
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
|
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
|
||||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||||
|
|
||||||
|
@ -365,7 +365,7 @@ static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if DYNAMIC_BMI2
|
#if DYNAMIC_BMI2
|
||||||
TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
|
BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
|
||||||
{
|
{
|
||||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
|
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
|
||||||
}
|
}
|
||||||
|
@ -89,9 +89,9 @@ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
|
|||||||
|
|
||||||
/** HUF_compress4X_wksp() :
|
/** HUF_compress4X_wksp() :
|
||||||
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
|
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
|
||||||
* `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
|
* `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
|
||||||
#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
|
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
|
||||||
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
|
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
|
||||||
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned tableLog,
|
unsigned maxSymbolValue, unsigned tableLog,
|
||||||
@ -116,11 +116,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
|||||||
|
|
||||||
|
|
||||||
/* *** Constants *** */
|
/* *** Constants *** */
|
||||||
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
|
||||||
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
|
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
|
||||||
#define HUF_SYMBOLVALUE_MAX 255
|
#define HUF_SYMBOLVALUE_MAX 255
|
||||||
|
|
||||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||||
# error "HUF_TABLELOG_MAX is too large !"
|
# error "HUF_TABLELOG_MAX is too large !"
|
||||||
#endif
|
#endif
|
||||||
@ -136,15 +136,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
|||||||
|
|
||||||
/* static allocation of HUF's Compression Table */
|
/* static allocation of HUF's Compression Table */
|
||||||
/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
|
/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
|
||||||
struct HUF_CElt_s {
|
typedef size_t HUF_CElt; /* consider it an incomplete type */
|
||||||
U16 val;
|
#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
|
||||||
BYTE nbBits;
|
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
|
||||||
}; /* typedef'd to HUF_CElt */
|
|
||||||
typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */
|
|
||||||
#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
|
|
||||||
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
|
|
||||||
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
|
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
|
||||||
HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
|
HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
|
||||||
|
|
||||||
/* static allocation of HUF's DTable */
|
/* static allocation of HUF's DTable */
|
||||||
typedef U32 HUF_DTable;
|
typedef U32 HUF_DTable;
|
||||||
@ -194,6 +190,7 @@ size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSym
|
|||||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
||||||
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
|
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
|
||||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||||
|
size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
|
||||||
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
||||||
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
||||||
|
|
||||||
@ -206,12 +203,13 @@ typedef enum {
|
|||||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||||
* If it uses hufTable it does not modify hufTable or repeat.
|
* If it uses hufTable it does not modify hufTable or repeat.
|
||||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||||
* If preferRepeat then the old table will always be used if valid. */
|
* If preferRepeat then the old table will always be used if valid.
|
||||||
|
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned tableLog,
|
unsigned maxSymbolValue, unsigned tableLog,
|
||||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
|
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
|
||||||
|
|
||||||
/** HUF_buildCTable_wksp() :
|
/** HUF_buildCTable_wksp() :
|
||||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||||
@ -249,11 +247,10 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
|
|||||||
* Loading a CTable saved with HUF_writeCTable() */
|
* Loading a CTable saved with HUF_writeCTable() */
|
||||||
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
|
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
|
||||||
|
|
||||||
/** HUF_getNbBits() :
|
/** HUF_getNbBitsFromCTable() :
|
||||||
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
|
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
|
||||||
* Note 1 : is not inlined, as HUF_CElt definition is private
|
* Note 1 : is not inlined, as HUF_CElt definition is private */
|
||||||
* Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
|
U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
|
||||||
U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* HUF_decompress() does the following:
|
* HUF_decompress() does the following:
|
||||||
@ -305,18 +302,20 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c
|
|||||||
/* ====================== */
|
/* ====================== */
|
||||||
|
|
||||||
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
|
||||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||||
|
size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
|
||||||
/** HUF_compress1X_repeat() :
|
/** HUF_compress1X_repeat() :
|
||||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||||
* If it uses hufTable it does not modify hufTable or repeat.
|
* If it uses hufTable it does not modify hufTable or repeat.
|
||||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||||
* If preferRepeat then the old table will always be used if valid. */
|
* If preferRepeat then the old table will always be used if valid.
|
||||||
|
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned tableLog,
|
unsigned maxSymbolValue, unsigned tableLog,
|
||||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
|
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
|
||||||
|
|
||||||
size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
||||||
#ifndef HUF_FORCE_DECOMPRESS_X1
|
#ifndef HUF_FORCE_DECOMPRESS_X1
|
||||||
@ -354,6 +353,9 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
|
|||||||
#ifndef HUF_FORCE_DECOMPRESS_X2
|
#ifndef HUF_FORCE_DECOMPRESS_X2
|
||||||
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
|
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef HUF_FORCE_DECOMPRESS_X1
|
||||||
|
size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* HUF_STATIC_LINKING_ONLY */
|
#endif /* HUF_STATIC_LINKING_ONLY */
|
||||||
|
|
||||||
|
@ -51,6 +51,8 @@ extern "C" {
|
|||||||
# include <stdint.h> /* intptr_t */
|
# include <stdint.h> /* intptr_t */
|
||||||
# endif
|
# endif
|
||||||
typedef uint8_t BYTE;
|
typedef uint8_t BYTE;
|
||||||
|
typedef uint8_t U8;
|
||||||
|
typedef int8_t S8;
|
||||||
typedef uint16_t U16;
|
typedef uint16_t U16;
|
||||||
typedef int16_t S16;
|
typedef int16_t S16;
|
||||||
typedef uint32_t U32;
|
typedef uint32_t U32;
|
||||||
@ -63,6 +65,8 @@ extern "C" {
|
|||||||
# error "this implementation requires char to be exactly 8-bit type"
|
# error "this implementation requires char to be exactly 8-bit type"
|
||||||
#endif
|
#endif
|
||||||
typedef unsigned char BYTE;
|
typedef unsigned char BYTE;
|
||||||
|
typedef unsigned char U8;
|
||||||
|
typedef signed char S8;
|
||||||
#if USHRT_MAX != 65535
|
#if USHRT_MAX != 65535
|
||||||
# error "this implementation requires short to be exactly 16-bit type"
|
# error "this implementation requires short to be exactly 16-bit type"
|
||||||
#endif
|
#endif
|
||||||
@ -153,8 +157,22 @@ MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
|
|||||||
|
|
||||||
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
||||||
{
|
{
|
||||||
|
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||||
|
return 1;
|
||||||
|
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
|
||||||
|
return 0;
|
||||||
|
#elif defined(__clang__) && __LITTLE_ENDIAN__
|
||||||
|
return 1;
|
||||||
|
#elif defined(__clang__) && __BIG_ENDIAN__
|
||||||
|
return 0;
|
||||||
|
#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86)
|
||||||
|
return 1;
|
||||||
|
#elif defined(__DMC__) && defined(_M_IX86)
|
||||||
|
return 1;
|
||||||
|
#else
|
||||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||||
return one.c[0];
|
return one.c[0];
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
||||||
|
131
zstd/common/portability_macros.h
Normal file
131
zstd/common/portability_macros.h
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) Facebook, Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* This source code is licensed under both the BSD-style license (found in the
|
||||||
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||||
|
* in the COPYING file in the root directory of this source tree).
|
||||||
|
* You may select, at your option, one of the above-listed licenses.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ZSTD_PORTABILITY_MACROS_H
|
||||||
|
#define ZSTD_PORTABILITY_MACROS_H
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This header file contains macro defintions to support portability.
|
||||||
|
* This header is shared between C and ASM code, so it MUST only
|
||||||
|
* contain macro definitions. It MUST not contain any C code.
|
||||||
|
*
|
||||||
|
* This header ONLY defines macros to detect platforms/feature support.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/* compat. with non-clang compilers */
|
||||||
|
#ifndef __has_attribute
|
||||||
|
#define __has_attribute(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* compat. with non-clang compilers */
|
||||||
|
#ifndef __has_builtin
|
||||||
|
# define __has_builtin(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* compat. with non-clang compilers */
|
||||||
|
#ifndef __has_feature
|
||||||
|
# define __has_feature(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* detects whether we are being compiled under msan */
|
||||||
|
#ifndef ZSTD_MEMORY_SANITIZER
|
||||||
|
# if __has_feature(memory_sanitizer)
|
||||||
|
# define ZSTD_MEMORY_SANITIZER 1
|
||||||
|
# else
|
||||||
|
# define ZSTD_MEMORY_SANITIZER 0
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* detects whether we are being compiled under asan */
|
||||||
|
#ifndef ZSTD_ADDRESS_SANITIZER
|
||||||
|
# if __has_feature(address_sanitizer)
|
||||||
|
# define ZSTD_ADDRESS_SANITIZER 1
|
||||||
|
# elif defined(__SANITIZE_ADDRESS__)
|
||||||
|
# define ZSTD_ADDRESS_SANITIZER 1
|
||||||
|
# else
|
||||||
|
# define ZSTD_ADDRESS_SANITIZER 0
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* detects whether we are being compiled under dfsan */
|
||||||
|
#ifndef ZSTD_DATAFLOW_SANITIZER
|
||||||
|
# if __has_feature(dataflow_sanitizer)
|
||||||
|
# define ZSTD_DATAFLOW_SANITIZER 1
|
||||||
|
# else
|
||||||
|
# define ZSTD_DATAFLOW_SANITIZER 0
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/* Enable runtime BMI2 dispatch based on the CPU.
|
||||||
|
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
||||||
|
*/
|
||||||
|
#ifndef DYNAMIC_BMI2
|
||||||
|
#if ((defined(__clang__) && __has_attribute(__target__)) \
|
||||||
|
|| (defined(__GNUC__) \
|
||||||
|
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
|
||||||
|
&& (defined(__x86_64__) || defined(_M_X64)) \
|
||||||
|
&& !defined(__BMI2__)
|
||||||
|
# define DYNAMIC_BMI2 1
|
||||||
|
#else
|
||||||
|
# define DYNAMIC_BMI2 0
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Only enable assembly for GNUC comptabile compilers,
|
||||||
|
* because other platforms may not support GAS assembly syntax.
|
||||||
|
*
|
||||||
|
* Only enable assembly for Linux / MacOS, other platforms may
|
||||||
|
* work, but they haven't been tested. This could likely be
|
||||||
|
* extended to BSD systems.
|
||||||
|
*
|
||||||
|
* Disable assembly when MSAN is enabled, because MSAN requires
|
||||||
|
* 100% of code to be instrumented to work.
|
||||||
|
*/
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
# if defined(__linux__) || defined(__linux) || defined(__APPLE__)
|
||||||
|
# if ZSTD_MEMORY_SANITIZER
|
||||||
|
# define ZSTD_ASM_SUPPORTED 0
|
||||||
|
# elif ZSTD_DATAFLOW_SANITIZER
|
||||||
|
# define ZSTD_ASM_SUPPORTED 0
|
||||||
|
# else
|
||||||
|
# define ZSTD_ASM_SUPPORTED 1
|
||||||
|
# endif
|
||||||
|
# else
|
||||||
|
# define ZSTD_ASM_SUPPORTED 0
|
||||||
|
# endif
|
||||||
|
#else
|
||||||
|
# define ZSTD_ASM_SUPPORTED 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines whether we should enable assembly for x86-64
|
||||||
|
* with BMI2.
|
||||||
|
*
|
||||||
|
* Enable if all of the following conditions hold:
|
||||||
|
* - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
|
||||||
|
* - Assembly is supported
|
||||||
|
* - We are compiling for x86-64 and either:
|
||||||
|
* - DYNAMIC_BMI2 is enabled
|
||||||
|
* - BMI2 is supported at compile time
|
||||||
|
*/
|
||||||
|
#if !defined(ZSTD_DISABLE_ASM) && \
|
||||||
|
ZSTD_ASM_SUPPORTED && \
|
||||||
|
defined(__x86_64__) && \
|
||||||
|
(DYNAMIC_BMI2 || defined(__BMI2__))
|
||||||
|
# define ZSTD_ENABLE_ASM_X86_64_BMI2 1
|
||||||
|
#else
|
||||||
|
# define ZSTD_ENABLE_ASM_X86_64_BMI2 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* ZSTD_PORTABILITY_MACROS_H */
|
@ -13,812 +13,12 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Tuning parameters
|
/*
|
||||||
***************************************/
|
* xxhash.c instantiates functions defined in xxhash.h
|
||||||
/*!XXH_FORCE_MEMORY_ACCESS :
|
|
||||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
|
||||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
|
||||||
* The below switch allow to select different access method for improved performance.
|
|
||||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
|
||||||
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
|
||||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
|
||||||
* Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
|
|
||||||
* It can generate buggy code on targets which do not support unaligned memory accesses.
|
|
||||||
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
|
||||||
* See http://stackoverflow.com/a/32095106/646947 for details.
|
|
||||||
* Prefer these methods in priority order (0 > 1 > 2)
|
|
||||||
*/
|
*/
|
||||||
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
|
||||||
# if (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
|
|
||||||
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
|
|
||||||
defined(__ICCARM__)
|
|
||||||
# define XXH_FORCE_MEMORY_ACCESS 1
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
|
#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
|
||||||
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
|
#define XXH_IMPLEMENTATION /* access definitions */
|
||||||
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
|
|
||||||
* By default, this option is disabled. To enable it, uncomment below define :
|
|
||||||
*/
|
|
||||||
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
|
|
||||||
|
|
||||||
/*!XXH_FORCE_NATIVE_FORMAT :
|
|
||||||
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
|
|
||||||
* Results are therefore identical for little-endian and big-endian CPU.
|
|
||||||
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
|
|
||||||
* Should endian-independence be of no importance for your application, you may set the #define below to 1,
|
|
||||||
* to improve speed for Big-endian CPU.
|
|
||||||
* This option has no impact on Little_Endian CPU.
|
|
||||||
*/
|
|
||||||
#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
|
|
||||||
# define XXH_FORCE_NATIVE_FORMAT 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*!XXH_FORCE_ALIGN_CHECK :
|
|
||||||
* This is a minor performance trick, only useful with lots of very small keys.
|
|
||||||
* It means : check for aligned/unaligned input.
|
|
||||||
* The check costs one initial branch per hash; set to 0 when the input data
|
|
||||||
* is guaranteed to be aligned.
|
|
||||||
*/
|
|
||||||
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
|
||||||
# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
|
|
||||||
# define XXH_FORCE_ALIGN_CHECK 0
|
|
||||||
# else
|
|
||||||
# define XXH_FORCE_ALIGN_CHECK 1
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Includes & Memory related functions
|
|
||||||
***************************************/
|
|
||||||
/* Modify the local functions below should you wish to use some other memory routines */
|
|
||||||
/* for ZSTD_malloc(), ZSTD_free() */
|
|
||||||
#define ZSTD_DEPS_NEED_MALLOC
|
|
||||||
#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
|
|
||||||
static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
|
|
||||||
static void XXH_free (void* p) { ZSTD_free(p); }
|
|
||||||
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
|
|
||||||
|
|
||||||
#ifndef XXH_STATIC_LINKING_ONLY
|
|
||||||
# define XXH_STATIC_LINKING_ONLY
|
|
||||||
#endif
|
|
||||||
#include "xxhash.h"
|
#include "xxhash.h"
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Compiler Specific Options
|
|
||||||
***************************************/
|
|
||||||
#include "compiler.h"
|
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Basic Types
|
|
||||||
***************************************/
|
|
||||||
#include "mem.h" /* BYTE, U32, U64, size_t */
|
|
||||||
|
|
||||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
|
||||||
|
|
||||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
|
||||||
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
|
||||||
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
|
||||||
|
|
||||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
|
||||||
|
|
||||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
|
||||||
/* currently only defined for gcc and icc */
|
|
||||||
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
|
|
||||||
|
|
||||||
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
|
||||||
static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/* portable and safe solution. Generally efficient.
|
|
||||||
* see : http://stackoverflow.com/a/32095106/646947
|
|
||||||
*/
|
|
||||||
|
|
||||||
static U32 XXH_read32(const void* memPtr)
|
|
||||||
{
|
|
||||||
U32 val;
|
|
||||||
ZSTD_memcpy(&val, memPtr, sizeof(val));
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
static U64 XXH_read64(const void* memPtr)
|
|
||||||
{
|
|
||||||
U64 val;
|
|
||||||
ZSTD_memcpy(&val, memPtr, sizeof(val));
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
|
||||||
|
|
||||||
|
|
||||||
/* ****************************************
|
|
||||||
* Compiler-specific Functions and Macros
|
|
||||||
******************************************/
|
|
||||||
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
|
||||||
|
|
||||||
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
|
|
||||||
#if defined(_MSC_VER)
|
|
||||||
# define XXH_rotl32(x,r) _rotl(x,r)
|
|
||||||
# define XXH_rotl64(x,r) _rotl64(x,r)
|
|
||||||
#else
|
|
||||||
#if defined(__ICCARM__)
|
|
||||||
# include <intrinsics.h>
|
|
||||||
# define XXH_rotl32(x,r) __ROR(x,(32 - r))
|
|
||||||
#else
|
|
||||||
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
|
||||||
#endif
|
|
||||||
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(_MSC_VER) /* Visual Studio */
|
|
||||||
# define XXH_swap32 _byteswap_ulong
|
|
||||||
# define XXH_swap64 _byteswap_uint64
|
|
||||||
#elif GCC_VERSION >= 403
|
|
||||||
# define XXH_swap32 __builtin_bswap32
|
|
||||||
# define XXH_swap64 __builtin_bswap64
|
|
||||||
#else
|
|
||||||
static U32 XXH_swap32 (U32 x)
|
|
||||||
{
|
|
||||||
return ((x << 24) & 0xff000000 ) |
|
|
||||||
((x << 8) & 0x00ff0000 ) |
|
|
||||||
((x >> 8) & 0x0000ff00 ) |
|
|
||||||
((x >> 24) & 0x000000ff );
|
|
||||||
}
|
|
||||||
static U64 XXH_swap64 (U64 x)
|
|
||||||
{
|
|
||||||
return ((x << 56) & 0xff00000000000000ULL) |
|
|
||||||
((x << 40) & 0x00ff000000000000ULL) |
|
|
||||||
((x << 24) & 0x0000ff0000000000ULL) |
|
|
||||||
((x << 8) & 0x000000ff00000000ULL) |
|
|
||||||
((x >> 8) & 0x00000000ff000000ULL) |
|
|
||||||
((x >> 24) & 0x0000000000ff0000ULL) |
|
|
||||||
((x >> 40) & 0x000000000000ff00ULL) |
|
|
||||||
((x >> 56) & 0x00000000000000ffULL);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Architecture Macros
|
|
||||||
***************************************/
|
|
||||||
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
|
||||||
|
|
||||||
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
|
||||||
#ifndef XXH_CPU_LITTLE_ENDIAN
|
|
||||||
static const int g_one = 1;
|
|
||||||
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/* ***************************
|
|
||||||
* Memory reads
|
|
||||||
*****************************/
|
|
||||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
|
||||||
{
|
|
||||||
if (align==XXH_unaligned)
|
|
||||||
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
|
||||||
else
|
|
||||||
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
|
||||||
{
|
|
||||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
|
||||||
}
|
|
||||||
|
|
||||||
static U32 XXH_readBE32(const void* ptr)
|
|
||||||
{
|
|
||||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
|
||||||
{
|
|
||||||
if (align==XXH_unaligned)
|
|
||||||
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
|
||||||
else
|
|
||||||
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
|
||||||
{
|
|
||||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
|
||||||
}
|
|
||||||
|
|
||||||
static U64 XXH_readBE64(const void* ptr)
|
|
||||||
{
|
|
||||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Macros
|
|
||||||
***************************************/
|
|
||||||
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
|
||||||
|
|
||||||
|
|
||||||
/* *************************************
|
|
||||||
* Constants
|
|
||||||
***************************************/
|
|
||||||
static const U32 PRIME32_1 = 2654435761U;
|
|
||||||
static const U32 PRIME32_2 = 2246822519U;
|
|
||||||
static const U32 PRIME32_3 = 3266489917U;
|
|
||||||
static const U32 PRIME32_4 = 668265263U;
|
|
||||||
static const U32 PRIME32_5 = 374761393U;
|
|
||||||
|
|
||||||
static const U64 PRIME64_1 = 11400714785074694791ULL;
|
|
||||||
static const U64 PRIME64_2 = 14029467366897019727ULL;
|
|
||||||
static const U64 PRIME64_3 = 1609587929392839161ULL;
|
|
||||||
static const U64 PRIME64_4 = 9650029242287828579ULL;
|
|
||||||
static const U64 PRIME64_5 = 2870177450012600261ULL;
|
|
||||||
|
|
||||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
|
||||||
|
|
||||||
|
|
||||||
/* **************************
|
|
||||||
* Utils
|
|
||||||
****************************/
|
|
||||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
|
|
||||||
{
|
|
||||||
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
|
|
||||||
{
|
|
||||||
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* ***************************
|
|
||||||
* Simple Hash Functions
|
|
||||||
*****************************/
|
|
||||||
|
|
||||||
static U32 XXH32_round(U32 seed, U32 input)
|
|
||||||
{
|
|
||||||
seed += input * PRIME32_2;
|
|
||||||
seed = XXH_rotl32(seed, 13);
|
|
||||||
seed *= PRIME32_1;
|
|
||||||
return seed;
|
|
||||||
}
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
|
||||||
{
|
|
||||||
const BYTE* p = (const BYTE*)input;
|
|
||||||
const BYTE* bEnd = p + len;
|
|
||||||
U32 h32;
|
|
||||||
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
|
||||||
|
|
||||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
|
||||||
if (p==NULL) {
|
|
||||||
len=0;
|
|
||||||
bEnd=p=(const BYTE*)(size_t)16;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (len>=16) {
|
|
||||||
const BYTE* const limit = bEnd - 16;
|
|
||||||
U32 v1 = seed + PRIME32_1 + PRIME32_2;
|
|
||||||
U32 v2 = seed + PRIME32_2;
|
|
||||||
U32 v3 = seed + 0;
|
|
||||||
U32 v4 = seed - PRIME32_1;
|
|
||||||
|
|
||||||
do {
|
|
||||||
v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
|
|
||||||
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
|
|
||||||
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
|
|
||||||
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
|
|
||||||
} while (p<=limit);
|
|
||||||
|
|
||||||
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
|
|
||||||
} else {
|
|
||||||
h32 = seed + PRIME32_5;
|
|
||||||
}
|
|
||||||
|
|
||||||
h32 += (U32) len;
|
|
||||||
|
|
||||||
while (p+4<=bEnd) {
|
|
||||||
h32 += XXH_get32bits(p) * PRIME32_3;
|
|
||||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
|
|
||||||
p+=4;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (p<bEnd) {
|
|
||||||
h32 += (*p) * PRIME32_5;
|
|
||||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
|
|
||||||
p++;
|
|
||||||
}
|
|
||||||
|
|
||||||
h32 ^= h32 >> 15;
|
|
||||||
h32 *= PRIME32_2;
|
|
||||||
h32 ^= h32 >> 13;
|
|
||||||
h32 *= PRIME32_3;
|
|
||||||
h32 ^= h32 >> 16;
|
|
||||||
|
|
||||||
return h32;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
|
|
||||||
{
|
|
||||||
#if 0
|
|
||||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
|
||||||
XXH32_CREATESTATE_STATIC(state);
|
|
||||||
XXH32_reset(state, seed);
|
|
||||||
XXH32_update(state, input, len);
|
|
||||||
return XXH32_digest(state);
|
|
||||||
#else
|
|
||||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
if (XXH_FORCE_ALIGN_CHECK) {
|
|
||||||
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
|
||||||
else
|
|
||||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
|
||||||
} }
|
|
||||||
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
|
||||||
else
|
|
||||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static U64 XXH64_round(U64 acc, U64 input)
|
|
||||||
{
|
|
||||||
acc += input * PRIME64_2;
|
|
||||||
acc = XXH_rotl64(acc, 31);
|
|
||||||
acc *= PRIME64_1;
|
|
||||||
return acc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static U64 XXH64_mergeRound(U64 acc, U64 val)
|
|
||||||
{
|
|
||||||
val = XXH64_round(0, val);
|
|
||||||
acc ^= val;
|
|
||||||
acc = acc * PRIME64_1 + PRIME64_4;
|
|
||||||
return acc;
|
|
||||||
}
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
|
||||||
{
|
|
||||||
const BYTE* p = (const BYTE*)input;
|
|
||||||
const BYTE* const bEnd = p + len;
|
|
||||||
U64 h64;
|
|
||||||
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
|
||||||
|
|
||||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
|
||||||
if (p==NULL) {
|
|
||||||
len=0;
|
|
||||||
bEnd=p=(const BYTE*)(size_t)32;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (len>=32) {
|
|
||||||
const BYTE* const limit = bEnd - 32;
|
|
||||||
U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
|
||||||
U64 v2 = seed + PRIME64_2;
|
|
||||||
U64 v3 = seed + 0;
|
|
||||||
U64 v4 = seed - PRIME64_1;
|
|
||||||
|
|
||||||
do {
|
|
||||||
v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
|
|
||||||
v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
|
|
||||||
v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
|
|
||||||
v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
|
|
||||||
} while (p<=limit);
|
|
||||||
|
|
||||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
|
||||||
h64 = XXH64_mergeRound(h64, v1);
|
|
||||||
h64 = XXH64_mergeRound(h64, v2);
|
|
||||||
h64 = XXH64_mergeRound(h64, v3);
|
|
||||||
h64 = XXH64_mergeRound(h64, v4);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
h64 = seed + PRIME64_5;
|
|
||||||
}
|
|
||||||
|
|
||||||
h64 += (U64) len;
|
|
||||||
|
|
||||||
while (p+8<=bEnd) {
|
|
||||||
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
|
|
||||||
h64 ^= k1;
|
|
||||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
|
||||||
p+=8;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p+4<=bEnd) {
|
|
||||||
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
|
|
||||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
|
||||||
p+=4;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (p<bEnd) {
|
|
||||||
h64 ^= (*p) * PRIME64_5;
|
|
||||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
|
||||||
p++;
|
|
||||||
}
|
|
||||||
|
|
||||||
h64 ^= h64 >> 33;
|
|
||||||
h64 *= PRIME64_2;
|
|
||||||
h64 ^= h64 >> 29;
|
|
||||||
h64 *= PRIME64_3;
|
|
||||||
h64 ^= h64 >> 32;
|
|
||||||
|
|
||||||
return h64;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
|
|
||||||
{
|
|
||||||
#if 0
|
|
||||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
|
||||||
XXH64_CREATESTATE_STATIC(state);
|
|
||||||
XXH64_reset(state, seed);
|
|
||||||
XXH64_update(state, input, len);
|
|
||||||
return XXH64_digest(state);
|
|
||||||
#else
|
|
||||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
if (XXH_FORCE_ALIGN_CHECK) {
|
|
||||||
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
|
||||||
else
|
|
||||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
|
||||||
} }
|
|
||||||
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
|
||||||
else
|
|
||||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* **************************************************
|
|
||||||
* Advanced Hash Functions
|
|
||||||
****************************************************/
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
|
|
||||||
{
|
|
||||||
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
|
|
||||||
}
|
|
||||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
|
|
||||||
{
|
|
||||||
XXH_free(statePtr);
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
|
|
||||||
{
|
|
||||||
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
|
|
||||||
}
|
|
||||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
|
||||||
{
|
|
||||||
XXH_free(statePtr);
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*** Hash feed ***/
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
|
||||||
{
|
|
||||||
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
|
||||||
ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
|
|
||||||
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
|
||||||
state.v2 = seed + PRIME32_2;
|
|
||||||
state.v3 = seed + 0;
|
|
||||||
state.v4 = seed - PRIME32_1;
|
|
||||||
ZSTD_memcpy(statePtr, &state, sizeof(state));
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
|
||||||
{
|
|
||||||
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
|
||||||
ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
|
||||||
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
|
||||||
state.v2 = seed + PRIME64_2;
|
|
||||||
state.v3 = seed + 0;
|
|
||||||
state.v4 = seed - PRIME64_1;
|
|
||||||
ZSTD_memcpy(statePtr, &state, sizeof(state));
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
|
||||||
{
|
|
||||||
const BYTE* p = (const BYTE*)input;
|
|
||||||
const BYTE* const bEnd = p + len;
|
|
||||||
|
|
||||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
|
||||||
if (input==NULL) return XXH_ERROR;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
state->total_len_32 += (unsigned)len;
|
|
||||||
state->large_len |= (len>=16) | (state->total_len_32>=16);
|
|
||||||
|
|
||||||
if (state->memsize + len < 16) { /* fill in tmp buffer */
|
|
||||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
|
|
||||||
state->memsize += (unsigned)len;
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state->memsize) { /* some data left from previous update */
|
|
||||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
|
||||||
{ const U32* p32 = state->mem32;
|
|
||||||
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
|
|
||||||
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
|
|
||||||
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
|
|
||||||
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
|
|
||||||
}
|
|
||||||
p += 16-state->memsize;
|
|
||||||
state->memsize = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p <= bEnd-16) {
|
|
||||||
const BYTE* const limit = bEnd - 16;
|
|
||||||
U32 v1 = state->v1;
|
|
||||||
U32 v2 = state->v2;
|
|
||||||
U32 v3 = state->v3;
|
|
||||||
U32 v4 = state->v4;
|
|
||||||
|
|
||||||
do {
|
|
||||||
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
|
|
||||||
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
|
|
||||||
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
|
|
||||||
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
|
|
||||||
} while (p<=limit);
|
|
||||||
|
|
||||||
state->v1 = v1;
|
|
||||||
state->v2 = v2;
|
|
||||||
state->v3 = v3;
|
|
||||||
state->v4 = v4;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p < bEnd) {
|
|
||||||
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
|
|
||||||
state->memsize = (unsigned)(bEnd-p);
|
|
||||||
}
|
|
||||||
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
|
|
||||||
{
|
|
||||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
|
|
||||||
else
|
|
||||||
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
|
||||||
{
|
|
||||||
const BYTE * p = (const BYTE*)state->mem32;
|
|
||||||
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
|
|
||||||
U32 h32;
|
|
||||||
|
|
||||||
if (state->large_len) {
|
|
||||||
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
|
|
||||||
} else {
|
|
||||||
h32 = state->v3 /* == seed */ + PRIME32_5;
|
|
||||||
}
|
|
||||||
|
|
||||||
h32 += state->total_len_32;
|
|
||||||
|
|
||||||
while (p+4<=bEnd) {
|
|
||||||
h32 += XXH_readLE32(p, endian) * PRIME32_3;
|
|
||||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
|
|
||||||
p+=4;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (p<bEnd) {
|
|
||||||
h32 += (*p) * PRIME32_5;
|
|
||||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
|
|
||||||
p++;
|
|
||||||
}
|
|
||||||
|
|
||||||
h32 ^= h32 >> 15;
|
|
||||||
h32 *= PRIME32_2;
|
|
||||||
h32 ^= h32 >> 13;
|
|
||||||
h32 *= PRIME32_3;
|
|
||||||
h32 ^= h32 >> 16;
|
|
||||||
|
|
||||||
return h32;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
|
||||||
{
|
|
||||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH32_digest_endian(state_in, XXH_littleEndian);
|
|
||||||
else
|
|
||||||
return XXH32_digest_endian(state_in, XXH_bigEndian);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* **** XXH64 **** */
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
|
||||||
{
|
|
||||||
const BYTE* p = (const BYTE*)input;
|
|
||||||
const BYTE* const bEnd = p + len;
|
|
||||||
|
|
||||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
|
||||||
if (input==NULL) return XXH_ERROR;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
state->total_len += len;
|
|
||||||
|
|
||||||
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
|
||||||
if (input != NULL) {
|
|
||||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
|
||||||
}
|
|
||||||
state->memsize += (U32)len;
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state->memsize) { /* tmp buffer is full */
|
|
||||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
|
||||||
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
|
||||||
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
|
||||||
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
|
||||||
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
|
||||||
p += 32-state->memsize;
|
|
||||||
state->memsize = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p+32 <= bEnd) {
|
|
||||||
const BYTE* const limit = bEnd - 32;
|
|
||||||
U64 v1 = state->v1;
|
|
||||||
U64 v2 = state->v2;
|
|
||||||
U64 v3 = state->v3;
|
|
||||||
U64 v4 = state->v4;
|
|
||||||
|
|
||||||
do {
|
|
||||||
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
|
|
||||||
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
|
|
||||||
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
|
|
||||||
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
|
|
||||||
} while (p<=limit);
|
|
||||||
|
|
||||||
state->v1 = v1;
|
|
||||||
state->v2 = v2;
|
|
||||||
state->v3 = v3;
|
|
||||||
state->v4 = v4;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p < bEnd) {
|
|
||||||
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
|
|
||||||
state->memsize = (unsigned)(bEnd-p);
|
|
||||||
}
|
|
||||||
|
|
||||||
return XXH_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
|
|
||||||
{
|
|
||||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
|
||||||
else
|
|
||||||
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
|
||||||
{
|
|
||||||
const BYTE * p = (const BYTE*)state->mem64;
|
|
||||||
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
|
||||||
U64 h64;
|
|
||||||
|
|
||||||
if (state->total_len >= 32) {
|
|
||||||
U64 const v1 = state->v1;
|
|
||||||
U64 const v2 = state->v2;
|
|
||||||
U64 const v3 = state->v3;
|
|
||||||
U64 const v4 = state->v4;
|
|
||||||
|
|
||||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
|
||||||
h64 = XXH64_mergeRound(h64, v1);
|
|
||||||
h64 = XXH64_mergeRound(h64, v2);
|
|
||||||
h64 = XXH64_mergeRound(h64, v3);
|
|
||||||
h64 = XXH64_mergeRound(h64, v4);
|
|
||||||
} else {
|
|
||||||
h64 = state->v3 + PRIME64_5;
|
|
||||||
}
|
|
||||||
|
|
||||||
h64 += (U64) state->total_len;
|
|
||||||
|
|
||||||
while (p+8<=bEnd) {
|
|
||||||
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
|
|
||||||
h64 ^= k1;
|
|
||||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
|
||||||
p+=8;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p+4<=bEnd) {
|
|
||||||
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
|
|
||||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
|
||||||
p+=4;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (p<bEnd) {
|
|
||||||
h64 ^= (*p) * PRIME64_5;
|
|
||||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
|
||||||
p++;
|
|
||||||
}
|
|
||||||
|
|
||||||
h64 ^= h64 >> 33;
|
|
||||||
h64 *= PRIME64_2;
|
|
||||||
h64 ^= h64 >> 29;
|
|
||||||
h64 *= PRIME64_3;
|
|
||||||
h64 ^= h64 >> 32;
|
|
||||||
|
|
||||||
return h64;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|
|
||||||
{
|
|
||||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
||||||
return XXH64_digest_endian(state_in, XXH_littleEndian);
|
|
||||||
else
|
|
||||||
return XXH64_digest_endian(state_in, XXH_bigEndian);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* **************************
|
|
||||||
* Canonical representation
|
|
||||||
****************************/
|
|
||||||
|
|
||||||
/*! Default XXH result types are basic unsigned 32 and 64 bits.
|
|
||||||
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
|
|
||||||
* These functions allow transformation of hash result into and from its canonical format.
|
|
||||||
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
|
|
||||||
{
|
|
||||||
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
|
|
||||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
|
|
||||||
ZSTD_memcpy(dst, &hash, sizeof(*dst));
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
|
||||||
{
|
|
||||||
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
|
||||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
|
||||||
ZSTD_memcpy(dst, &hash, sizeof(*dst));
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
|
|
||||||
{
|
|
||||||
return XXH_readBE32(src);
|
|
||||||
}
|
|
||||||
|
|
||||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
|
|
||||||
{
|
|
||||||
return XXH_readBE64(src);
|
|
||||||
}
|
|
||||||
|
5725
zstd/common/xxhash.h
5725
zstd/common/xxhash.h
File diff suppressed because it is too large
Load Diff
@ -19,10 +19,8 @@
|
|||||||
/*-*************************************
|
/*-*************************************
|
||||||
* Dependencies
|
* Dependencies
|
||||||
***************************************/
|
***************************************/
|
||||||
#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
|
|
||||||
#include <arm_neon.h>
|
|
||||||
#endif
|
|
||||||
#include "compiler.h"
|
#include "compiler.h"
|
||||||
|
#include "cpu.h"
|
||||||
#include "mem.h"
|
#include "mem.h"
|
||||||
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
|
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
|
||||||
#include "error_private.h"
|
#include "error_private.h"
|
||||||
@ -60,81 +58,7 @@ extern "C" {
|
|||||||
#undef MAX
|
#undef MAX
|
||||||
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
||||||
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
||||||
|
#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
|
||||||
/**
|
|
||||||
* Ignore: this is an internal helper.
|
|
||||||
*
|
|
||||||
* This is a helper function to help force C99-correctness during compilation.
|
|
||||||
* Under strict compilation modes, variadic macro arguments can't be empty.
|
|
||||||
* However, variadic function arguments can be. Using a function therefore lets
|
|
||||||
* us statically check that at least one (string) argument was passed,
|
|
||||||
* independent of the compilation flags.
|
|
||||||
*/
|
|
||||||
static INLINE_KEYWORD UNUSED_ATTR
|
|
||||||
void _force_has_format_string(const char *format, ...) {
|
|
||||||
(void)format;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Ignore: this is an internal helper.
|
|
||||||
*
|
|
||||||
* We want to force this function invocation to be syntactically correct, but
|
|
||||||
* we don't want to force runtime evaluation of its arguments.
|
|
||||||
*/
|
|
||||||
#define _FORCE_HAS_FORMAT_STRING(...) \
|
|
||||||
if (0) { \
|
|
||||||
_force_has_format_string(__VA_ARGS__); \
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the specified error if the condition evaluates to true.
|
|
||||||
*
|
|
||||||
* In debug modes, prints additional information.
|
|
||||||
* In order to do that (particularly, printing the conditional that failed),
|
|
||||||
* this can't just wrap RETURN_ERROR().
|
|
||||||
*/
|
|
||||||
#define RETURN_ERROR_IF(cond, err, ...) \
|
|
||||||
if (cond) { \
|
|
||||||
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
|
|
||||||
__FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
|
|
||||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
|
||||||
RAWLOG(3, ": " __VA_ARGS__); \
|
|
||||||
RAWLOG(3, "\n"); \
|
|
||||||
return ERROR(err); \
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Unconditionally return the specified error.
|
|
||||||
*
|
|
||||||
* In debug modes, prints additional information.
|
|
||||||
*/
|
|
||||||
#define RETURN_ERROR(err, ...) \
|
|
||||||
do { \
|
|
||||||
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
|
|
||||||
__FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
|
|
||||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
|
||||||
RAWLOG(3, ": " __VA_ARGS__); \
|
|
||||||
RAWLOG(3, "\n"); \
|
|
||||||
return ERROR(err); \
|
|
||||||
} while(0);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the provided expression evaluates to an error code, returns that error code.
|
|
||||||
*
|
|
||||||
* In debug modes, prints additional information.
|
|
||||||
*/
|
|
||||||
#define FORWARD_IF_ERROR(err, ...) \
|
|
||||||
do { \
|
|
||||||
size_t const err_code = (err); \
|
|
||||||
if (ERR_isError(err_code)) { \
|
|
||||||
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
|
|
||||||
__FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
|
|
||||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
|
||||||
RAWLOG(3, ": " __VA_ARGS__); \
|
|
||||||
RAWLOG(3, "\n"); \
|
|
||||||
return err_code; \
|
|
||||||
} \
|
|
||||||
} while(0);
|
|
||||||
|
|
||||||
|
|
||||||
/*-*************************************
|
/*-*************************************
|
||||||
@ -195,7 +119,7 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
|
|||||||
/* Each table cannot take more than #symbols * FSELog bits */
|
/* Each table cannot take more than #symbols * FSELog bits */
|
||||||
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
|
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
|
||||||
|
|
||||||
static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
|
static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
|
||||||
0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
1, 1, 1, 1, 2, 2, 3, 3,
|
1, 1, 1, 1, 2, 2, 3, 3,
|
||||||
@ -212,7 +136,7 @@ static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
|
|||||||
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
||||||
static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
||||||
|
|
||||||
static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
|
static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
|
||||||
0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
@ -247,19 +171,30 @@ static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
|||||||
* Shared functions to include for inlining
|
* Shared functions to include for inlining
|
||||||
*********************************************/
|
*********************************************/
|
||||||
static void ZSTD_copy8(void* dst, const void* src) {
|
static void ZSTD_copy8(void* dst, const void* src) {
|
||||||
#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
|
#if defined(ZSTD_ARCH_ARM_NEON)
|
||||||
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
|
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
|
||||||
#else
|
#else
|
||||||
ZSTD_memcpy(dst, src, 8);
|
ZSTD_memcpy(dst, src, 8);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||||
|
|
||||||
|
/* Need to use memmove here since the literal buffer can now be located within
|
||||||
|
the dst buffer. In circumstances where the op "catches up" to where the
|
||||||
|
literal buffer is, there can be partial overlaps in this call on the final
|
||||||
|
copy if the literal is being shifted by less than 16 bytes. */
|
||||||
static void ZSTD_copy16(void* dst, const void* src) {
|
static void ZSTD_copy16(void* dst, const void* src) {
|
||||||
#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
|
#if defined(ZSTD_ARCH_ARM_NEON)
|
||||||
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
|
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
|
||||||
|
#elif defined(ZSTD_ARCH_X86_SSE2)
|
||||||
|
_mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
|
||||||
|
#elif defined(__clang__)
|
||||||
|
ZSTD_memmove(dst, src, 16);
|
||||||
#else
|
#else
|
||||||
ZSTD_memcpy(dst, src, 16);
|
/* ZSTD_memmove is not inlined properly by gcc */
|
||||||
|
BYTE copy16_buf[16];
|
||||||
|
ZSTD_memcpy(copy16_buf, src, 16);
|
||||||
|
ZSTD_memcpy(dst, copy16_buf, 16);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
|
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
|
||||||
@ -288,8 +223,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
|
|||||||
BYTE* op = (BYTE*)dst;
|
BYTE* op = (BYTE*)dst;
|
||||||
BYTE* const oend = op + length;
|
BYTE* const oend = op + length;
|
||||||
|
|
||||||
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
|
|
||||||
|
|
||||||
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
|
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
|
||||||
/* Handle short offset copies. */
|
/* Handle short offset copies. */
|
||||||
do {
|
do {
|
||||||
@ -436,8 +369,14 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
|
|||||||
# if STATIC_BMI2 == 1
|
# if STATIC_BMI2 == 1
|
||||||
return _lzcnt_u32(val)^31;
|
return _lzcnt_u32(val)^31;
|
||||||
# else
|
# else
|
||||||
unsigned long r=0;
|
if (val != 0) {
|
||||||
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
|
unsigned long r;
|
||||||
|
_BitScanReverse(&r, val);
|
||||||
|
return (unsigned)r;
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
||||||
return __builtin_clz (val) ^ 31;
|
return __builtin_clz (val) ^ 31;
|
||||||
@ -456,6 +395,63 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Counts the number of trailing zeros of a `size_t`.
|
||||||
|
* Most compilers should support CTZ as a builtin. A backup
|
||||||
|
* implementation is provided if the builtin isn't supported, but
|
||||||
|
* it may not be terribly efficient.
|
||||||
|
*/
|
||||||
|
MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)
|
||||||
|
{
|
||||||
|
if (MEM_64bits()) {
|
||||||
|
# if defined(_MSC_VER) && defined(_WIN64)
|
||||||
|
# if STATIC_BMI2
|
||||||
|
return _tzcnt_u64(val);
|
||||||
|
# else
|
||||||
|
if (val != 0) {
|
||||||
|
unsigned long r;
|
||||||
|
_BitScanForward64(&r, (U64)val);
|
||||||
|
return (unsigned)r;
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
|
# endif
|
||||||
|
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||||
|
return __builtin_ctzll((U64)val);
|
||||||
|
# else
|
||||||
|
static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19,
|
||||||
|
4, 25, 14, 28, 9, 34, 20, 56,
|
||||||
|
5, 17, 26, 54, 15, 41, 29, 43,
|
||||||
|
10, 31, 38, 35, 21, 45, 49, 57,
|
||||||
|
63, 6, 12, 18, 24, 27, 33, 55,
|
||||||
|
16, 53, 40, 42, 30, 37, 44, 48,
|
||||||
|
62, 11, 23, 32, 52, 39, 36, 47,
|
||||||
|
61, 22, 51, 46, 60, 50, 59, 58 };
|
||||||
|
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
||||||
|
# endif
|
||||||
|
} else { /* 32 bits */
|
||||||
|
# if defined(_MSC_VER)
|
||||||
|
if (val != 0) {
|
||||||
|
unsigned long r;
|
||||||
|
_BitScanForward(&r, (U32)val);
|
||||||
|
return (unsigned)r;
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
|
return __builtin_ctz((U32)val);
|
||||||
|
# else
|
||||||
|
static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3,
|
||||||
|
30, 22, 20, 15, 25, 17, 4, 8,
|
||||||
|
31, 27, 13, 23, 21, 19, 16, 7,
|
||||||
|
26, 12, 18, 6, 11, 5, 10, 9 };
|
||||||
|
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ZSTD_invalidateRepCodes() :
|
/* ZSTD_invalidateRepCodes() :
|
||||||
* ensures next compression will not use repcodes from previous block.
|
* ensures next compression will not use repcodes from previous block.
|
||||||
@ -482,6 +478,14 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
|||||||
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
||||||
const void* src, size_t srcSize);
|
const void* src, size_t srcSize);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns true iff the CPU supports dynamic BMI2 dispatch.
|
||||||
|
*/
|
||||||
|
MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
|
||||||
|
{
|
||||||
|
ZSTD_cpuid_t cpuid = ZSTD_cpuid();
|
||||||
|
return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
|
||||||
|
}
|
||||||
|
|
||||||
#if defined (__cplusplus)
|
#if defined (__cplusplus)
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,19 @@ extern "C" {
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
|
|
||||||
/* weak symbol support */
|
/* weak symbol support
|
||||||
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && defined(__GNUC__) && \
|
* For now, enable conservatively:
|
||||||
|
* - Only GNUC
|
||||||
|
* - Only ELF
|
||||||
|
* - Only x86-64 and i386
|
||||||
|
* Also, explicitly disable on platforms known not to work so they aren't
|
||||||
|
* forgotten in the future.
|
||||||
|
*/
|
||||||
|
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \
|
||||||
|
defined(__GNUC__) && defined(__ELF__) && \
|
||||||
|
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)) && \
|
||||||
!defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
|
!defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
|
||||||
!defined(__CYGWIN__)
|
!defined(__CYGWIN__) && !defined(_AIX)
|
||||||
# define ZSTD_HAVE_WEAK_SYMBOLS 1
|
# define ZSTD_HAVE_WEAK_SYMBOLS 1
|
||||||
#else
|
#else
|
||||||
# define ZSTD_HAVE_WEAK_SYMBOLS 0
|
# define ZSTD_HAVE_WEAK_SYMBOLS 0
|
||||||
|
134
zstd/compress/clevels.h
Normal file
134
zstd/compress/clevels.h
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) Yann Collet, Facebook, Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* This source code is licensed under both the BSD-style license (found in the
|
||||||
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||||
|
* in the COPYING file in the root directory of this source tree).
|
||||||
|
* You may select, at your option, one of the above-listed licenses.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ZSTD_CLEVELS_H
|
||||||
|
#define ZSTD_CLEVELS_H
|
||||||
|
|
||||||
|
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
|
||||||
|
#include "../zstd.h"
|
||||||
|
|
||||||
|
/*-===== Pre-defined compression levels =====-*/
|
||||||
|
|
||||||
|
#define ZSTD_MAX_CLEVEL 22
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
__attribute__((__unused__))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
|
||||||
|
{ /* "default" - for any srcSize > 256 KB */
|
||||||
|
/* W, C, H, S, L, TL, strat */
|
||||||
|
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
|
||||||
|
{ 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
|
||||||
|
{ 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
|
||||||
|
{ 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
|
||||||
|
{ 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
|
||||||
|
{ 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */
|
||||||
|
{ 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */
|
||||||
|
{ 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */
|
||||||
|
{ 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */
|
||||||
|
{ 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
|
||||||
|
{ 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */
|
||||||
|
{ 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */
|
||||||
|
{ 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */
|
||||||
|
{ 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
|
||||||
|
{ 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
|
||||||
|
{ 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
|
||||||
|
{ 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
|
||||||
|
{ 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
|
||||||
|
{ 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
|
||||||
|
{ 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
|
||||||
|
{ 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
|
||||||
|
{ 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
|
||||||
|
{ 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
|
||||||
|
},
|
||||||
|
{ /* for srcSize <= 256 KB */
|
||||||
|
/* W, C, H, S, L, T, strat */
|
||||||
|
{ 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||||
|
{ 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
|
||||||
|
{ 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
|
||||||
|
{ 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
|
||||||
|
{ 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/
|
||||||
|
{ 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/
|
||||||
|
{ 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
|
||||||
|
{ 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
|
||||||
|
{ 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
|
||||||
|
{ 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
|
||||||
|
{ 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
|
||||||
|
{ 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
|
||||||
|
{ 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
|
||||||
|
{ 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
|
||||||
|
{ 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
|
||||||
|
{ 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
|
||||||
|
{ 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
|
||||||
|
{ 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
|
||||||
|
{ 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
|
||||||
|
{ 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||||
|
{ 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||||
|
{ 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||||
|
{ 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||||
|
},
|
||||||
|
{ /* for srcSize <= 128 KB */
|
||||||
|
/* W, C, H, S, L, T, strat */
|
||||||
|
{ 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||||
|
{ 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
|
||||||
|
{ 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
|
||||||
|
{ 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
|
||||||
|
{ 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
|
||||||
|
{ 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
|
||||||
|
{ 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
|
||||||
|
{ 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
|
||||||
|
{ 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
|
||||||
|
{ 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
|
||||||
|
{ 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
|
||||||
|
{ 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
|
||||||
|
{ 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
|
||||||
|
{ 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
|
||||||
|
{ 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
|
||||||
|
{ 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
|
||||||
|
{ 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
|
||||||
|
{ 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
|
||||||
|
{ 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
|
||||||
|
{ 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||||
|
{ 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||||
|
{ 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||||
|
{ 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||||
|
},
|
||||||
|
{ /* for srcSize <= 16 KB */
|
||||||
|
/* W, C, H, S, L, T, strat */
|
||||||
|
{ 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||||
|
{ 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
|
||||||
|
{ 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
|
||||||
|
{ 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
|
||||||
|
{ 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
|
||||||
|
{ 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
|
||||||
|
{ 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
|
||||||
|
{ 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
|
||||||
|
{ 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
|
||||||
|
{ 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
|
||||||
|
{ 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
|
||||||
|
{ 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
|
||||||
|
{ 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
|
||||||
|
{ 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
|
||||||
|
{ 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
|
||||||
|
{ 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
|
||||||
|
{ 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
|
||||||
|
{ 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
|
||||||
|
{ 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
|
||||||
|
{ 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||||
|
{ 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||||
|
{ 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||||
|
{ 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* ZSTD_CLEVELS_H */
|
@ -75,13 +75,14 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|||||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
||||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||||
U32 const step = FSE_TABLESTEP(tableSize);
|
U32 const step = FSE_TABLESTEP(tableSize);
|
||||||
|
U32 const maxSV1 = maxSymbolValue+1;
|
||||||
|
|
||||||
U32* cumul = (U32*)workSpace;
|
U16* cumul = (U16*)workSpace; /* size = maxSV1 */
|
||||||
FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
|
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
|
||||||
|
|
||||||
U32 highThreshold = tableSize-1;
|
U32 highThreshold = tableSize-1;
|
||||||
|
|
||||||
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
|
assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
|
||||||
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
|
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||||
/* CTable header */
|
/* CTable header */
|
||||||
tableU16[-2] = (U16) tableLog;
|
tableU16[-2] = (U16) tableLog;
|
||||||
@ -98,20 +99,61 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|||||||
/* symbol start positions */
|
/* symbol start positions */
|
||||||
{ U32 u;
|
{ U32 u;
|
||||||
cumul[0] = 0;
|
cumul[0] = 0;
|
||||||
for (u=1; u <= maxSymbolValue+1; u++) {
|
for (u=1; u <= maxSV1; u++) {
|
||||||
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
|
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
|
||||||
cumul[u] = cumul[u-1] + 1;
|
cumul[u] = cumul[u-1] + 1;
|
||||||
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
|
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
|
||||||
} else {
|
} else {
|
||||||
cumul[u] = cumul[u-1] + normalizedCounter[u-1];
|
assert(normalizedCounter[u-1] >= 0);
|
||||||
|
cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
|
||||||
|
assert(cumul[u] >= cumul[u-1]); /* no overflow */
|
||||||
} }
|
} }
|
||||||
cumul[maxSymbolValue+1] = tableSize+1;
|
cumul[maxSV1] = (U16)(tableSize+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Spread symbols */
|
/* Spread symbols */
|
||||||
{ U32 position = 0;
|
if (highThreshold == tableSize - 1) {
|
||||||
|
/* Case for no low prob count symbols. Lay down 8 bytes at a time
|
||||||
|
* to reduce branch misses since we are operating on a small block
|
||||||
|
*/
|
||||||
|
BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
|
||||||
|
{ U64 const add = 0x0101010101010101ull;
|
||||||
|
size_t pos = 0;
|
||||||
|
U64 sv = 0;
|
||||||
|
U32 s;
|
||||||
|
for (s=0; s<maxSV1; ++s, sv += add) {
|
||||||
|
int i;
|
||||||
|
int const n = normalizedCounter[s];
|
||||||
|
MEM_write64(spread + pos, sv);
|
||||||
|
for (i = 8; i < n; i += 8) {
|
||||||
|
MEM_write64(spread + pos + i, sv);
|
||||||
|
}
|
||||||
|
assert(n>=0);
|
||||||
|
pos += (size_t)n;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Spread symbols across the table. Lack of lowprob symbols means that
|
||||||
|
* we don't need variable sized inner loop, so we can unroll the loop and
|
||||||
|
* reduce branch misses.
|
||||||
|
*/
|
||||||
|
{ size_t position = 0;
|
||||||
|
size_t s;
|
||||||
|
size_t const unroll = 2; /* Experimentally determined optimal unroll */
|
||||||
|
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
|
||||||
|
for (s = 0; s < (size_t)tableSize; s += unroll) {
|
||||||
|
size_t u;
|
||||||
|
for (u = 0; u < unroll; ++u) {
|
||||||
|
size_t const uPosition = (position + (u * step)) & tableMask;
|
||||||
|
tableSymbol[uPosition] = spread[s + u];
|
||||||
|
}
|
||||||
|
position = (position + (unroll * step)) & tableMask;
|
||||||
|
}
|
||||||
|
assert(position == 0); /* Must have initialized all positions */
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
U32 position = 0;
|
||||||
U32 symbol;
|
U32 symbol;
|
||||||
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
|
for (symbol=0; symbol<maxSV1; symbol++) {
|
||||||
int nbOccurrences;
|
int nbOccurrences;
|
||||||
int const freq = normalizedCounter[symbol];
|
int const freq = normalizedCounter[symbol];
|
||||||
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
|
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
|
||||||
@ -120,7 +162,6 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|||||||
while (position > highThreshold)
|
while (position > highThreshold)
|
||||||
position = (position + step) & tableMask; /* Low proba area */
|
position = (position + step) & tableMask; /* Low proba area */
|
||||||
} }
|
} }
|
||||||
|
|
||||||
assert(position==0); /* Must have initialized all positions */
|
assert(position==0); /* Must have initialized all positions */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,16 +185,17 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|||||||
case -1:
|
case -1:
|
||||||
case 1:
|
case 1:
|
||||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
|
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
|
||||||
symbolTT[s].deltaFindState = total - 1;
|
assert(total <= INT_MAX);
|
||||||
|
symbolTT[s].deltaFindState = (int)(total - 1);
|
||||||
total ++;
|
total ++;
|
||||||
break;
|
break;
|
||||||
default :
|
default :
|
||||||
{
|
assert(normalizedCounter[s] > 1);
|
||||||
U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
|
{ U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1);
|
||||||
U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
|
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
|
||||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||||
symbolTT[s].deltaFindState = total - normalizedCounter[s];
|
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
|
||||||
total += normalizedCounter[s];
|
total += (unsigned)normalizedCounter[s];
|
||||||
} } } }
|
} } } }
|
||||||
|
|
||||||
#if 0 /* debug : symbol costs */
|
#if 0 /* debug : symbol costs */
|
||||||
@ -164,32 +206,26 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
|||||||
symbol, normalizedCounter[symbol],
|
symbol, normalizedCounter[symbol],
|
||||||
FSE_getMaxNbBits(symbolTT, symbol),
|
FSE_getMaxNbBits(symbolTT, symbol),
|
||||||
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
|
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
|
||||||
}
|
} }
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
|
||||||
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
|
||||||
{
|
|
||||||
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
|
|
||||||
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef FSE_COMMONDEFS_ONLY
|
#ifndef FSE_COMMONDEFS_ONLY
|
||||||
|
|
||||||
|
|
||||||
/*-**************************************************************
|
/*-**************************************************************
|
||||||
* FSE NCount encoding
|
* FSE NCount encoding
|
||||||
****************************************************************/
|
****************************************************************/
|
||||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||||
{
|
{
|
||||||
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
|
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
|
||||||
|
+ 4 /* bitCount initialized at 4 */
|
||||||
|
+ 2 /* first two symbols may use one additional bit each */) / 8)
|
||||||
|
+ 1 /* round up to whole nb bytes */
|
||||||
|
+ 2 /* additional two bytes for bitstream flush */;
|
||||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,6 +53,28 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
|
|||||||
/* *******************************************************
|
/* *******************************************************
|
||||||
* HUF : Huffman block compression
|
* HUF : Huffman block compression
|
||||||
*********************************************************/
|
*********************************************************/
|
||||||
|
#define HUF_WORKSPACE_MAX_ALIGNMENT 8
|
||||||
|
|
||||||
|
static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
|
||||||
|
{
|
||||||
|
size_t const mask = align - 1;
|
||||||
|
size_t const rem = (size_t)workspace & mask;
|
||||||
|
size_t const add = (align - rem) & mask;
|
||||||
|
BYTE* const aligned = (BYTE*)workspace + add;
|
||||||
|
assert((align & (align - 1)) == 0); /* pow 2 */
|
||||||
|
assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
|
||||||
|
if (*workspaceSizePtr >= add) {
|
||||||
|
assert(add < align);
|
||||||
|
assert(((size_t)aligned & mask) == 0);
|
||||||
|
*workspaceSizePtr -= add;
|
||||||
|
return aligned;
|
||||||
|
} else {
|
||||||
|
*workspaceSizePtr = 0;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* HUF_compressWeights() :
|
/* HUF_compressWeights() :
|
||||||
* Same as FSE_compress(), but dedicated to huff0's weights compression.
|
* Same as FSE_compress(), but dedicated to huff0's weights compression.
|
||||||
* The use case needs much less stack memory.
|
* The use case needs much less stack memory.
|
||||||
@ -75,7 +97,7 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT
|
|||||||
|
|
||||||
unsigned maxSymbolValue = HUF_TABLELOG_MAX;
|
unsigned maxSymbolValue = HUF_TABLELOG_MAX;
|
||||||
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
|
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
|
||||||
HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
|
HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
|
||||||
|
|
||||||
if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
|
if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
|
||||||
|
|
||||||
@ -106,6 +128,40 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT
|
|||||||
return (size_t)(op-ostart);
|
return (size_t)(op-ostart);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static size_t HUF_getNbBits(HUF_CElt elt)
|
||||||
|
{
|
||||||
|
return elt & 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t HUF_getNbBitsFast(HUF_CElt elt)
|
||||||
|
{
|
||||||
|
return elt;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t HUF_getValue(HUF_CElt elt)
|
||||||
|
{
|
||||||
|
return elt & ~0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t HUF_getValueFast(HUF_CElt elt)
|
||||||
|
{
|
||||||
|
return elt;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
|
||||||
|
{
|
||||||
|
assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
|
||||||
|
*elt = nbBits;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void HUF_setValue(HUF_CElt* elt, size_t value)
|
||||||
|
{
|
||||||
|
size_t const nbBits = HUF_getNbBits(*elt);
|
||||||
|
if (nbBits > 0) {
|
||||||
|
assert((value >> nbBits) == 0);
|
||||||
|
*elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
HUF_CompressWeightsWksp wksp;
|
HUF_CompressWeightsWksp wksp;
|
||||||
@ -117,9 +173,10 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
|
|||||||
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
|
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
|
||||||
void* workspace, size_t workspaceSize)
|
void* workspace, size_t workspaceSize)
|
||||||
{
|
{
|
||||||
|
HUF_CElt const* const ct = CTable + 1;
|
||||||
BYTE* op = (BYTE*)dst;
|
BYTE* op = (BYTE*)dst;
|
||||||
U32 n;
|
U32 n;
|
||||||
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
|
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
|
||||||
|
|
||||||
/* check conditions */
|
/* check conditions */
|
||||||
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
|
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
|
||||||
@ -130,9 +187,10 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
|
|||||||
for (n=1; n<huffLog+1; n++)
|
for (n=1; n<huffLog+1; n++)
|
||||||
wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
|
wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
|
||||||
for (n=0; n<maxSymbolValue; n++)
|
for (n=0; n<maxSymbolValue; n++)
|
||||||
wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
|
wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
|
||||||
|
|
||||||
/* attempt weights compression by FSE */
|
/* attempt weights compression by FSE */
|
||||||
|
if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
|
||||||
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
|
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
|
||||||
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
|
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
|
||||||
op[0] = (BYTE)hSize;
|
op[0] = (BYTE)hSize;
|
||||||
@ -166,6 +224,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
|
|||||||
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
|
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
|
||||||
U32 tableLog = 0;
|
U32 tableLog = 0;
|
||||||
U32 nbSymbols = 0;
|
U32 nbSymbols = 0;
|
||||||
|
HUF_CElt* const ct = CTable + 1;
|
||||||
|
|
||||||
/* get symbol weights */
|
/* get symbol weights */
|
||||||
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
|
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
|
||||||
@ -175,6 +234,8 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
|
|||||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||||
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
|
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
|
||||||
|
|
||||||
|
CTable[0] = tableLog;
|
||||||
|
|
||||||
/* Prepare base value per rank */
|
/* Prepare base value per rank */
|
||||||
{ U32 n, nextRankStart = 0;
|
{ U32 n, nextRankStart = 0;
|
||||||
for (n=1; n<=tableLog; n++) {
|
for (n=1; n<=tableLog; n++) {
|
||||||
@ -186,13 +247,13 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
|
|||||||
/* fill nbBits */
|
/* fill nbBits */
|
||||||
{ U32 n; for (n=0; n<nbSymbols; n++) {
|
{ U32 n; for (n=0; n<nbSymbols; n++) {
|
||||||
const U32 w = huffWeight[n];
|
const U32 w = huffWeight[n];
|
||||||
CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
|
HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
|
||||||
} }
|
} }
|
||||||
|
|
||||||
/* fill val */
|
/* fill val */
|
||||||
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
|
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
|
||||||
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
|
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
|
||||||
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
|
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
|
||||||
/* determine stating value per rank */
|
/* determine stating value per rank */
|
||||||
valPerRank[tableLog+1] = 0; /* for w==0 */
|
valPerRank[tableLog+1] = 0; /* for w==0 */
|
||||||
{ U16 min = 0;
|
{ U16 min = 0;
|
||||||
@ -202,18 +263,18 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
|
|||||||
min >>= 1;
|
min >>= 1;
|
||||||
} }
|
} }
|
||||||
/* assign value within rank, symbol order */
|
/* assign value within rank, symbol order */
|
||||||
{ U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
|
{ U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
|
||||||
}
|
}
|
||||||
|
|
||||||
*maxSymbolValuePtr = nbSymbols - 1;
|
*maxSymbolValuePtr = nbSymbols - 1;
|
||||||
return readSize;
|
return readSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
|
U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
|
||||||
{
|
{
|
||||||
const HUF_CElt* table = (const HUF_CElt*)symbolTable;
|
const HUF_CElt* ct = CTable + 1;
|
||||||
assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
|
assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
|
||||||
return table[symbolValue].nbBits;
|
return (U32)HUF_getNbBits(ct[symbolValue]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -367,22 +428,118 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
|||||||
}
|
}
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
U32 base;
|
U16 base;
|
||||||
U32 curr;
|
U16 curr;
|
||||||
} rankPos;
|
} rankPos;
|
||||||
|
|
||||||
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
|
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
|
||||||
|
|
||||||
#define RANK_POSITION_TABLE_SIZE 32
|
/* Number of buckets available for HUF_sort() */
|
||||||
|
#define RANK_POSITION_TABLE_SIZE 192
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
huffNodeTable huffNodeTbl;
|
huffNodeTable huffNodeTbl;
|
||||||
rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
|
rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
|
||||||
} HUF_buildCTable_wksp_tables;
|
} HUF_buildCTable_wksp_tables;
|
||||||
|
|
||||||
|
/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
|
||||||
|
* Strategy is to use as many buckets as possible for representing distinct
|
||||||
|
* counts while using the remainder to represent all "large" counts.
|
||||||
|
*
|
||||||
|
* To satisfy this requirement for 192 buckets, we can do the following:
|
||||||
|
* Let buckets 0-166 represent distinct counts of [0, 166]
|
||||||
|
* Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
|
||||||
|
*/
|
||||||
|
#define RANK_POSITION_MAX_COUNT_LOG 32
|
||||||
|
#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */
|
||||||
|
#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */
|
||||||
|
|
||||||
|
/* Return the appropriate bucket index for a given count. See definition of
|
||||||
|
* RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
|
||||||
|
*/
|
||||||
|
static U32 HUF_getIndex(U32 const count) {
|
||||||
|
return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
|
||||||
|
? count
|
||||||
|
: BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Helper swap function for HUF_quickSortPartition() */
|
||||||
|
static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
|
||||||
|
nodeElt tmp = *a;
|
||||||
|
*a = *b;
|
||||||
|
*b = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns 0 if the huffNode array is not sorted by descending count */
|
||||||
|
MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
|
||||||
|
U32 i;
|
||||||
|
for (i = 1; i < maxSymbolValue1; ++i) {
|
||||||
|
if (huffNode[i].count > huffNode[i-1].count) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Insertion sort by descending order */
|
||||||
|
HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
|
||||||
|
int i;
|
||||||
|
int const size = high-low+1;
|
||||||
|
huffNode += low;
|
||||||
|
for (i = 1; i < size; ++i) {
|
||||||
|
nodeElt const key = huffNode[i];
|
||||||
|
int j = i - 1;
|
||||||
|
while (j >= 0 && huffNode[j].count < key.count) {
|
||||||
|
huffNode[j + 1] = huffNode[j];
|
||||||
|
j--;
|
||||||
|
}
|
||||||
|
huffNode[j + 1] = key;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Pivot helper function for quicksort. */
|
||||||
|
static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
|
||||||
|
/* Simply select rightmost element as pivot. "Better" selectors like
|
||||||
|
* median-of-three don't experimentally appear to have any benefit.
|
||||||
|
*/
|
||||||
|
U32 const pivot = arr[high].count;
|
||||||
|
int i = low - 1;
|
||||||
|
int j = low;
|
||||||
|
for ( ; j < high; j++) {
|
||||||
|
if (arr[j].count > pivot) {
|
||||||
|
i++;
|
||||||
|
HUF_swapNodes(&arr[i], &arr[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
HUF_swapNodes(&arr[i + 1], &arr[high]);
|
||||||
|
return i + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Classic quicksort by descending with partially iterative calls
|
||||||
|
* to reduce worst case callstack size.
|
||||||
|
*/
|
||||||
|
static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
|
||||||
|
int const kInsertionSortThreshold = 8;
|
||||||
|
if (high - low < kInsertionSortThreshold) {
|
||||||
|
HUF_insertionSort(arr, low, high);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
while (low < high) {
|
||||||
|
int const idx = HUF_quickSortPartition(arr, low, high);
|
||||||
|
if (idx - low < high - idx) {
|
||||||
|
HUF_simpleQuickSort(arr, low, idx - 1);
|
||||||
|
low = idx + 1;
|
||||||
|
} else {
|
||||||
|
HUF_simpleQuickSort(arr, idx + 1, high);
|
||||||
|
high = idx - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HUF_sort():
|
* HUF_sort():
|
||||||
* Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
|
* Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
|
||||||
|
* This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
|
||||||
*
|
*
|
||||||
* @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
|
* @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
|
||||||
* Must have (maxSymbolValue + 1) entries.
|
* Must have (maxSymbolValue + 1) entries.
|
||||||
@ -390,44 +547,52 @@ typedef struct {
|
|||||||
* @param[in] maxSymbolValue Maximum symbol value.
|
* @param[in] maxSymbolValue Maximum symbol value.
|
||||||
* @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
|
* @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
|
||||||
*/
|
*/
|
||||||
static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
|
static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
|
||||||
{
|
U32 n;
|
||||||
int n;
|
U32 const maxSymbolValue1 = maxSymbolValue+1;
|
||||||
int const maxSymbolValue1 = (int)maxSymbolValue + 1;
|
|
||||||
|
|
||||||
/* Compute base and set curr to base.
|
/* Compute base and set curr to base.
|
||||||
* For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
|
* For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
|
||||||
* Then 2^lowerRank <= count[n]+1 <= 2^rank.
|
* See HUF_getIndex to see bucketing strategy.
|
||||||
* We attribute each symbol to lowerRank's base value, because we want to know where
|
* We attribute each symbol to lowerRank's base value, because we want to know where
|
||||||
* each rank begins in the output, so for rank R we want to count ranks R+1 and above.
|
* each rank begins in the output, so for rank R we want to count ranks R+1 and above.
|
||||||
*/
|
*/
|
||||||
ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
|
ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
|
||||||
for (n = 0; n < maxSymbolValue1; ++n) {
|
for (n = 0; n < maxSymbolValue1; ++n) {
|
||||||
U32 lowerRank = BIT_highbit32(count[n] + 1);
|
U32 lowerRank = HUF_getIndex(count[n]);
|
||||||
|
assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
|
||||||
rankPosition[lowerRank].base++;
|
rankPosition[lowerRank].base++;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
|
assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
|
||||||
|
/* Set up the rankPosition table */
|
||||||
for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
|
for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
|
||||||
rankPosition[n-1].base += rankPosition[n].base;
|
rankPosition[n-1].base += rankPosition[n].base;
|
||||||
rankPosition[n-1].curr = rankPosition[n-1].base;
|
rankPosition[n-1].curr = rankPosition[n-1].base;
|
||||||
}
|
}
|
||||||
/* Sort */
|
|
||||||
|
/* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
|
||||||
for (n = 0; n < maxSymbolValue1; ++n) {
|
for (n = 0; n < maxSymbolValue1; ++n) {
|
||||||
U32 const c = count[n];
|
U32 const c = count[n];
|
||||||
U32 const r = BIT_highbit32(c+1) + 1;
|
U32 const r = HUF_getIndex(c) + 1;
|
||||||
U32 pos = rankPosition[r].curr++;
|
U32 const pos = rankPosition[r].curr++;
|
||||||
/* Insert into the correct position in the rank.
|
assert(pos < maxSymbolValue1);
|
||||||
* We have at most 256 symbols, so this insertion should be fine.
|
|
||||||
*/
|
|
||||||
while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
|
|
||||||
huffNode[pos] = huffNode[pos-1];
|
|
||||||
pos--;
|
|
||||||
}
|
|
||||||
huffNode[pos].count = c;
|
huffNode[pos].count = c;
|
||||||
huffNode[pos].byte = (BYTE)n;
|
huffNode[pos].byte = (BYTE)n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sort each bucket. */
|
||||||
|
for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
|
||||||
|
U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base;
|
||||||
|
U32 const bucketStartIdx = rankPosition[n].base;
|
||||||
|
if (bucketSize > 1) {
|
||||||
|
assert(bucketStartIdx < maxSymbolValue1);
|
||||||
|
HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(HUF_isSorted(huffNode, maxSymbolValue1));
|
||||||
|
}
|
||||||
|
|
||||||
/** HUF_buildCTable_wksp() :
|
/** HUF_buildCTable_wksp() :
|
||||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||||
@ -490,6 +655,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
|
|||||||
*/
|
*/
|
||||||
static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
|
static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
|
||||||
{
|
{
|
||||||
|
HUF_CElt* const ct = CTable + 1;
|
||||||
/* fill result into ctable (val, nbBits) */
|
/* fill result into ctable (val, nbBits) */
|
||||||
int n;
|
int n;
|
||||||
U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
|
U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
|
||||||
@ -505,20 +671,20 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i
|
|||||||
min >>= 1;
|
min >>= 1;
|
||||||
} }
|
} }
|
||||||
for (n=0; n<alphabetSize; n++)
|
for (n=0; n<alphabetSize; n++)
|
||||||
CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
|
HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
|
||||||
for (n=0; n<alphabetSize; n++)
|
for (n=0; n<alphabetSize; n++)
|
||||||
CTable[n].val = valPerRank[CTable[n].nbBits]++; /* assign value within rank, symbol order */
|
HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
|
||||||
|
CTable[0] = maxNbBits;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
||||||
{
|
{
|
||||||
HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
|
HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
|
||||||
nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
|
nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
|
||||||
nodeElt* const huffNode = huffNode0+1;
|
nodeElt* const huffNode = huffNode0+1;
|
||||||
int nonNullRank;
|
int nonNullRank;
|
||||||
|
|
||||||
/* safety checks */
|
/* safety checks */
|
||||||
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
||||||
if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
|
if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
|
||||||
return ERROR(workSpace_tooSmall);
|
return ERROR(workSpace_tooSmall);
|
||||||
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
|
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
|
||||||
@ -536,96 +702,334 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo
|
|||||||
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
|
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
|
||||||
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
|
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
|
||||||
|
|
||||||
HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
|
HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
|
||||||
|
|
||||||
return maxNbBits;
|
return maxNbBits;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
|
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
|
||||||
{
|
{
|
||||||
|
HUF_CElt const* ct = CTable + 1;
|
||||||
size_t nbBits = 0;
|
size_t nbBits = 0;
|
||||||
int s;
|
int s;
|
||||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||||
nbBits += CTable[s].nbBits * count[s];
|
nbBits += HUF_getNbBits(ct[s]) * count[s];
|
||||||
}
|
}
|
||||||
return nbBits >> 3;
|
return nbBits >> 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
|
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
|
||||||
|
HUF_CElt const* ct = CTable + 1;
|
||||||
int bad = 0;
|
int bad = 0;
|
||||||
int s;
|
int s;
|
||||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||||
bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
|
bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
|
||||||
}
|
}
|
||||||
return !bad;
|
return !bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE void
|
/** HUF_CStream_t:
|
||||||
HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
* Huffman uses its own BIT_CStream_t implementation.
|
||||||
|
* There are three major differences from BIT_CStream_t:
|
||||||
|
* 1. HUF_addBits() takes a HUF_CElt (size_t) which is
|
||||||
|
* the pair (nbBits, value) in the format:
|
||||||
|
* format:
|
||||||
|
* - Bits [0, 4) = nbBits
|
||||||
|
* - Bits [4, 64 - nbBits) = 0
|
||||||
|
* - Bits [64 - nbBits, 64) = value
|
||||||
|
* 2. The bitContainer is built from the upper bits and
|
||||||
|
* right shifted. E.g. to add a new value of N bits
|
||||||
|
* you right shift the bitContainer by N, then or in
|
||||||
|
* the new value into the N upper bits.
|
||||||
|
* 3. The bitstream has two bit containers. You can add
|
||||||
|
* bits to the second container and merge them into
|
||||||
|
* the first container.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
size_t bitContainer[2];
|
||||||
|
size_t bitPos[2];
|
||||||
|
|
||||||
|
BYTE* startPtr;
|
||||||
|
BYTE* ptr;
|
||||||
|
BYTE* endPtr;
|
||||||
|
} HUF_CStream_t;
|
||||||
|
|
||||||
|
/**! HUF_initCStream():
|
||||||
|
* Initializes the bitstream.
|
||||||
|
* @returns 0 or an error code.
|
||||||
|
*/
|
||||||
|
static size_t HUF_initCStream(HUF_CStream_t* bitC,
|
||||||
|
void* startPtr, size_t dstCapacity)
|
||||||
{
|
{
|
||||||
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
|
ZSTD_memset(bitC, 0, sizeof(*bitC));
|
||||||
|
bitC->startPtr = (BYTE*)startPtr;
|
||||||
|
bitC->ptr = bitC->startPtr;
|
||||||
|
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
|
||||||
|
if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
|
/*! HUF_addBits():
|
||||||
|
* Adds the symbol stored in HUF_CElt elt to the bitstream.
|
||||||
|
*
|
||||||
|
* @param elt The element we're adding. This is a (nbBits, value) pair.
|
||||||
|
* See the HUF_CStream_t docs for the format.
|
||||||
|
* @param idx Insert into the bitstream at this idx.
|
||||||
|
* @param kFast This is a template parameter. If the bitstream is guaranteed
|
||||||
|
* to have at least 4 unused bits after this call it may be 1,
|
||||||
|
* otherwise it must be 0. HUF_addBits() is faster when fast is set.
|
||||||
|
*/
|
||||||
|
FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
|
||||||
|
{
|
||||||
|
assert(idx <= 1);
|
||||||
|
assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
|
||||||
|
/* This is efficient on x86-64 with BMI2 because shrx
|
||||||
|
* only reads the low 6 bits of the register. The compiler
|
||||||
|
* knows this and elides the mask. When fast is set,
|
||||||
|
* every operation can use the same value loaded from elt.
|
||||||
|
*/
|
||||||
|
bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
|
||||||
|
bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
|
||||||
|
/* We only read the low 8 bits of bitC->bitPos[idx] so it
|
||||||
|
* doesn't matter that the high bits have noise from the value.
|
||||||
|
*/
|
||||||
|
bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
|
||||||
|
assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
|
||||||
|
/* The last 4-bits of elt are dirty if fast is set,
|
||||||
|
* so we must not be overwriting bits that have already been
|
||||||
|
* inserted into the bit container.
|
||||||
|
*/
|
||||||
|
#if DEBUGLEVEL >= 1
|
||||||
|
{
|
||||||
|
size_t const nbBits = HUF_getNbBits(elt);
|
||||||
|
size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1;
|
||||||
|
(void)dirtyBits;
|
||||||
|
/* Middle bits are 0. */
|
||||||
|
assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
|
||||||
|
/* We didn't overwrite any bits in the bit container. */
|
||||||
|
assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
|
||||||
|
(void)dirtyBits;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#define HUF_FLUSHBITS_1(stream) \
|
FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
|
||||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
|
{
|
||||||
|
bitC->bitContainer[1] = 0;
|
||||||
|
bitC->bitPos[1] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*! HUF_mergeIndex1() :
|
||||||
|
* Merges the bit container @ index 1 into the bit container @ index 0
|
||||||
|
* and zeros the bit container @ index 1.
|
||||||
|
*/
|
||||||
|
FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
|
||||||
|
{
|
||||||
|
assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
|
||||||
|
bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
|
||||||
|
bitC->bitContainer[0] |= bitC->bitContainer[1];
|
||||||
|
bitC->bitPos[0] += bitC->bitPos[1];
|
||||||
|
assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*! HUF_flushBits() :
|
||||||
|
* Flushes the bits in the bit container @ index 0.
|
||||||
|
*
|
||||||
|
* @post bitPos will be < 8.
|
||||||
|
* @param kFast If kFast is set then we must know a-priori that
|
||||||
|
* the bit container will not overflow.
|
||||||
|
*/
|
||||||
|
FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
|
||||||
|
{
|
||||||
|
/* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
|
||||||
|
size_t const nbBits = bitC->bitPos[0] & 0xFF;
|
||||||
|
size_t const nbBytes = nbBits >> 3;
|
||||||
|
/* The top nbBits bits of bitContainer are the ones we need. */
|
||||||
|
size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
|
||||||
|
/* Mask bitPos to account for the bytes we consumed. */
|
||||||
|
bitC->bitPos[0] &= 7;
|
||||||
|
assert(nbBits > 0);
|
||||||
|
assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
|
||||||
|
assert(bitC->ptr <= bitC->endPtr);
|
||||||
|
MEM_writeLEST(bitC->ptr, bitContainer);
|
||||||
|
bitC->ptr += nbBytes;
|
||||||
|
assert(!kFast || bitC->ptr <= bitC->endPtr);
|
||||||
|
if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
|
||||||
|
/* bitContainer doesn't need to be modified because the leftover
|
||||||
|
* bits are already the top bitPos bits. And we don't care about
|
||||||
|
* noise in the lower values.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/*! HUF_endMark()
|
||||||
|
* @returns The Huffman stream end mark: A 1-bit value = 1.
|
||||||
|
*/
|
||||||
|
static HUF_CElt HUF_endMark(void)
|
||||||
|
{
|
||||||
|
HUF_CElt endMark;
|
||||||
|
HUF_setNbBits(&endMark, 1);
|
||||||
|
HUF_setValue(&endMark, 1);
|
||||||
|
return endMark;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*! HUF_closeCStream() :
|
||||||
|
* @return Size of CStream, in bytes,
|
||||||
|
* or 0 if it could not fit into dstBuffer */
|
||||||
|
static size_t HUF_closeCStream(HUF_CStream_t* bitC)
|
||||||
|
{
|
||||||
|
HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
|
||||||
|
HUF_flushBits(bitC, /* kFast */ 0);
|
||||||
|
{
|
||||||
|
size_t const nbBits = bitC->bitPos[0] & 0xFF;
|
||||||
|
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
|
||||||
|
return (bitC->ptr - bitC->startPtr) + (nbBits > 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FORCE_INLINE_TEMPLATE void
|
||||||
|
HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
|
||||||
|
{
|
||||||
|
HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
|
||||||
|
}
|
||||||
|
|
||||||
|
FORCE_INLINE_TEMPLATE void
|
||||||
|
HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
|
||||||
|
const BYTE* ip, size_t srcSize,
|
||||||
|
const HUF_CElt* ct,
|
||||||
|
int kUnroll, int kFastFlush, int kLastFast)
|
||||||
|
{
|
||||||
|
/* Join to kUnroll */
|
||||||
|
int n = (int)srcSize;
|
||||||
|
int rem = n % kUnroll;
|
||||||
|
if (rem > 0) {
|
||||||
|
for (; rem > 0; --rem) {
|
||||||
|
HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
|
||||||
|
}
|
||||||
|
HUF_flushBits(bitC, kFastFlush);
|
||||||
|
}
|
||||||
|
assert(n % kUnroll == 0);
|
||||||
|
|
||||||
|
/* Join to 2 * kUnroll */
|
||||||
|
if (n % (2 * kUnroll)) {
|
||||||
|
int u;
|
||||||
|
for (u = 1; u < kUnroll; ++u) {
|
||||||
|
HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
|
||||||
|
}
|
||||||
|
HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
|
||||||
|
HUF_flushBits(bitC, kFastFlush);
|
||||||
|
n -= kUnroll;
|
||||||
|
}
|
||||||
|
assert(n % (2 * kUnroll) == 0);
|
||||||
|
|
||||||
|
for (; n>0; n-= 2 * kUnroll) {
|
||||||
|
/* Encode kUnroll symbols into the bitstream @ index 0. */
|
||||||
|
int u;
|
||||||
|
for (u = 1; u < kUnroll; ++u) {
|
||||||
|
HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
|
||||||
|
}
|
||||||
|
HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
|
||||||
|
HUF_flushBits(bitC, kFastFlush);
|
||||||
|
/* Encode kUnroll symbols into the bitstream @ index 1.
|
||||||
|
* This allows us to start filling the bit container
|
||||||
|
* without any data dependencies.
|
||||||
|
*/
|
||||||
|
HUF_zeroIndex1(bitC);
|
||||||
|
for (u = 1; u < kUnroll; ++u) {
|
||||||
|
HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
|
||||||
|
}
|
||||||
|
HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
|
||||||
|
/* Merge bitstream @ index 1 into the bitstream @ index 0 */
|
||||||
|
HUF_mergeIndex1(bitC);
|
||||||
|
HUF_flushBits(bitC, kFastFlush);
|
||||||
|
}
|
||||||
|
assert(n == 0);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a tight upper bound on the output space needed by Huffman
|
||||||
|
* with 8 bytes buffer to handle over-writes. If the output is at least
|
||||||
|
* this large we don't need to do bounds checks during Huffman encoding.
|
||||||
|
*/
|
||||||
|
static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
|
||||||
|
{
|
||||||
|
return ((srcSize * tableLog) >> 3) + 8;
|
||||||
|
}
|
||||||
|
|
||||||
#define HUF_FLUSHBITS_2(stream) \
|
|
||||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE size_t
|
FORCE_INLINE_TEMPLATE size_t
|
||||||
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
|
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
const HUF_CElt* CTable)
|
const HUF_CElt* CTable)
|
||||||
{
|
{
|
||||||
|
U32 const tableLog = (U32)CTable[0];
|
||||||
|
HUF_CElt const* ct = CTable + 1;
|
||||||
const BYTE* ip = (const BYTE*) src;
|
const BYTE* ip = (const BYTE*) src;
|
||||||
BYTE* const ostart = (BYTE*)dst;
|
BYTE* const ostart = (BYTE*)dst;
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
BYTE* op = ostart;
|
BYTE* op = ostart;
|
||||||
size_t n;
|
HUF_CStream_t bitC;
|
||||||
BIT_CStream_t bitC;
|
|
||||||
|
|
||||||
/* init */
|
/* init */
|
||||||
if (dstSize < 8) return 0; /* not enough space to compress */
|
if (dstSize < 8) return 0; /* not enough space to compress */
|
||||||
{ size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
|
{ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
|
||||||
if (HUF_isError(initErr)) return 0; }
|
if (HUF_isError(initErr)) return 0; }
|
||||||
|
|
||||||
n = srcSize & ~3; /* join to mod 4 */
|
if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
|
||||||
switch (srcSize & 3)
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
|
||||||
{
|
else {
|
||||||
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
|
if (MEM_32bits()) {
|
||||||
HUF_FLUSHBITS_2(&bitC);
|
switch (tableLog) {
|
||||||
/* fall-through */
|
case 11:
|
||||||
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
|
||||||
HUF_FLUSHBITS_1(&bitC);
|
break;
|
||||||
/* fall-through */
|
case 10: ZSTD_FALLTHROUGH;
|
||||||
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
|
case 9: ZSTD_FALLTHROUGH;
|
||||||
HUF_FLUSHBITS(&bitC);
|
case 8:
|
||||||
/* fall-through */
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
|
||||||
case 0 : /* fall-through */
|
break;
|
||||||
default: break;
|
case 7: ZSTD_FALLTHROUGH;
|
||||||
|
default:
|
||||||
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
|
switch (tableLog) {
|
||||||
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
|
case 11:
|
||||||
HUF_FLUSHBITS_1(&bitC);
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
|
||||||
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
|
break;
|
||||||
HUF_FLUSHBITS_2(&bitC);
|
case 10:
|
||||||
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
|
||||||
HUF_FLUSHBITS_1(&bitC);
|
break;
|
||||||
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
|
case 9:
|
||||||
HUF_FLUSHBITS(&bitC);
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
|
||||||
|
break;
|
||||||
|
case 8:
|
||||||
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
|
||||||
|
break;
|
||||||
|
case 7:
|
||||||
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
|
||||||
|
break;
|
||||||
|
case 6: ZSTD_FALLTHROUGH;
|
||||||
|
default:
|
||||||
|
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(bitC.ptr <= bitC.endPtr);
|
||||||
|
|
||||||
return BIT_closeCStream(&bitC);
|
return HUF_closeCStream(&bitC);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DYNAMIC_BMI2
|
#if DYNAMIC_BMI2
|
||||||
|
|
||||||
static TARGET_ATTRIBUTE("bmi2") size_t
|
static BMI2_TARGET_ATTRIBUTE size_t
|
||||||
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
|
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
const HUF_CElt* CTable)
|
const HUF_CElt* CTable)
|
||||||
@ -667,9 +1071,13 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
|
|||||||
|
|
||||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||||
{
|
{
|
||||||
return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
|
||||||
|
{
|
||||||
|
return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
|
||||||
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
||||||
@ -689,8 +1097,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
|||||||
|
|
||||||
assert(op <= oend);
|
assert(op <= oend);
|
||||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
|
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
|
||||||
if (cSize==0) return 0;
|
if (cSize == 0 || cSize > 65535) return 0;
|
||||||
assert(cSize <= 65535);
|
|
||||||
MEM_writeLE16(ostart, (U16)cSize);
|
MEM_writeLE16(ostart, (U16)cSize);
|
||||||
op += cSize;
|
op += cSize;
|
||||||
}
|
}
|
||||||
@ -698,8 +1105,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
|||||||
ip += segmentSize;
|
ip += segmentSize;
|
||||||
assert(op <= oend);
|
assert(op <= oend);
|
||||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
|
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
|
||||||
if (cSize==0) return 0;
|
if (cSize == 0 || cSize > 65535) return 0;
|
||||||
assert(cSize <= 65535);
|
|
||||||
MEM_writeLE16(ostart+2, (U16)cSize);
|
MEM_writeLE16(ostart+2, (U16)cSize);
|
||||||
op += cSize;
|
op += cSize;
|
||||||
}
|
}
|
||||||
@ -707,8 +1113,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
|||||||
ip += segmentSize;
|
ip += segmentSize;
|
||||||
assert(op <= oend);
|
assert(op <= oend);
|
||||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
|
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
|
||||||
if (cSize==0) return 0;
|
if (cSize == 0 || cSize > 65535) return 0;
|
||||||
assert(cSize <= 65535);
|
|
||||||
MEM_writeLE16(ostart+4, (U16)cSize);
|
MEM_writeLE16(ostart+4, (U16)cSize);
|
||||||
op += cSize;
|
op += cSize;
|
||||||
}
|
}
|
||||||
@ -717,7 +1122,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
|||||||
assert(op <= oend);
|
assert(op <= oend);
|
||||||
assert(ip <= iend);
|
assert(ip <= iend);
|
||||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
|
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
|
||||||
if (cSize==0) return 0;
|
if (cSize == 0 || cSize > 65535) return 0;
|
||||||
op += cSize;
|
op += cSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -726,7 +1131,12 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
|||||||
|
|
||||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||||
{
|
{
|
||||||
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
|
||||||
|
{
|
||||||
|
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
|
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
|
||||||
@ -750,35 +1160,38 @@ static size_t HUF_compressCTable_internal(
|
|||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
|
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
|
||||||
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
|
HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
|
||||||
union {
|
union {
|
||||||
HUF_buildCTable_wksp_tables buildCTable_wksp;
|
HUF_buildCTable_wksp_tables buildCTable_wksp;
|
||||||
HUF_WriteCTableWksp writeCTable_wksp;
|
HUF_WriteCTableWksp writeCTable_wksp;
|
||||||
|
U32 hist_wksp[HIST_WKSP_SIZE_U32];
|
||||||
} wksps;
|
} wksps;
|
||||||
} HUF_compress_tables_t;
|
} HUF_compress_tables_t;
|
||||||
|
|
||||||
|
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
|
||||||
|
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
|
||||||
|
|
||||||
/* HUF_compress_internal() :
|
/* HUF_compress_internal() :
|
||||||
* `workSpace_align4` must be aligned on 4-bytes boundaries,
|
* `workSpace_align4` must be aligned on 4-bytes boundaries,
|
||||||
* and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
|
* and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
|
||||||
static size_t
|
static size_t
|
||||||
HUF_compress_internal (void* dst, size_t dstSize,
|
HUF_compress_internal (void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned huffLog,
|
unsigned maxSymbolValue, unsigned huffLog,
|
||||||
HUF_nbStreams_e nbStreams,
|
HUF_nbStreams_e nbStreams,
|
||||||
void* workSpace_align4, size_t wkspSize,
|
void* workSpace, size_t wkspSize,
|
||||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
|
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
|
||||||
const int bmi2)
|
const int bmi2, unsigned suspectUncompressible)
|
||||||
{
|
{
|
||||||
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
|
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
|
||||||
BYTE* const ostart = (BYTE*)dst;
|
BYTE* const ostart = (BYTE*)dst;
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
BYTE* op = ostart;
|
BYTE* op = ostart;
|
||||||
|
|
||||||
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
|
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
|
||||||
assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
|
|
||||||
|
|
||||||
/* checks & inits */
|
/* checks & inits */
|
||||||
if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
|
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
|
||||||
if (!srcSize) return 0; /* Uncompressed */
|
if (!srcSize) return 0; /* Uncompressed */
|
||||||
if (!dstSize) return 0; /* cannot fit anything within dst budget */
|
if (!dstSize) return 0; /* cannot fit anything within dst budget */
|
||||||
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
||||||
@ -794,8 +1207,23 @@ HUF_compress_internal (void* dst, size_t dstSize,
|
|||||||
nbStreams, oldHufTable, bmi2);
|
nbStreams, oldHufTable, bmi2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If uncompressible data is suspected, do a smaller sampling first */
|
||||||
|
DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
|
||||||
|
if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
|
||||||
|
size_t largestTotal = 0;
|
||||||
|
{ unsigned maxSymbolValueBegin = maxSymbolValue;
|
||||||
|
CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
|
||||||
|
largestTotal += largestBegin;
|
||||||
|
}
|
||||||
|
{ unsigned maxSymbolValueEnd = maxSymbolValue;
|
||||||
|
CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
|
||||||
|
largestTotal += largestEnd;
|
||||||
|
}
|
||||||
|
if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
|
||||||
|
}
|
||||||
|
|
||||||
/* Scan input and build symbol stats */
|
/* Scan input and build symbol stats */
|
||||||
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
|
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
|
||||||
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
||||||
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
|
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
|
||||||
}
|
}
|
||||||
@ -820,9 +1248,12 @@ HUF_compress_internal (void* dst, size_t dstSize,
|
|||||||
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
|
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
|
||||||
CHECK_F(maxBits);
|
CHECK_F(maxBits);
|
||||||
huffLog = (U32)maxBits;
|
huffLog = (U32)maxBits;
|
||||||
|
}
|
||||||
/* Zero unused symbols in CTable, so we can check it for validity */
|
/* Zero unused symbols in CTable, so we can check it for validity */
|
||||||
ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
|
{
|
||||||
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
|
size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
|
||||||
|
size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
|
||||||
|
ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write table description header */
|
/* Write table description header */
|
||||||
@ -859,19 +1290,20 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
|
|||||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||||
maxSymbolValue, huffLog, HUF_singleStream,
|
maxSymbolValue, huffLog, HUF_singleStream,
|
||||||
workSpace, wkspSize,
|
workSpace, wkspSize,
|
||||||
NULL, NULL, 0, 0 /*bmi2*/);
|
NULL, NULL, 0, 0 /*bmi2*/, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
|
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned huffLog,
|
unsigned maxSymbolValue, unsigned huffLog,
|
||||||
void* workSpace, size_t wkspSize,
|
void* workSpace, size_t wkspSize,
|
||||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
|
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
|
||||||
|
int bmi2, unsigned suspectUncompressible)
|
||||||
{
|
{
|
||||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||||
maxSymbolValue, huffLog, HUF_singleStream,
|
maxSymbolValue, huffLog, HUF_singleStream,
|
||||||
workSpace, wkspSize, hufTable,
|
workSpace, wkspSize, hufTable,
|
||||||
repeat, preferRepeat, bmi2);
|
repeat, preferRepeat, bmi2, suspectUncompressible);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HUF_compress4X_repeat():
|
/* HUF_compress4X_repeat():
|
||||||
@ -885,22 +1317,23 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
|
|||||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||||
maxSymbolValue, huffLog, HUF_fourStreams,
|
maxSymbolValue, huffLog, HUF_fourStreams,
|
||||||
workSpace, wkspSize,
|
workSpace, wkspSize,
|
||||||
NULL, NULL, 0, 0 /*bmi2*/);
|
NULL, NULL, 0, 0 /*bmi2*/, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HUF_compress4X_repeat():
|
/* HUF_compress4X_repeat():
|
||||||
* compress input using 4 streams.
|
* compress input using 4 streams.
|
||||||
|
* consider skipping quickly
|
||||||
* re-use an existing huffman compression table */
|
* re-use an existing huffman compression table */
|
||||||
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
|
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned huffLog,
|
unsigned maxSymbolValue, unsigned huffLog,
|
||||||
void* workSpace, size_t wkspSize,
|
void* workSpace, size_t wkspSize,
|
||||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
|
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
|
||||||
{
|
{
|
||||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||||
maxSymbolValue, huffLog, HUF_fourStreams,
|
maxSymbolValue, huffLog, HUF_fourStreams,
|
||||||
workSpace, wkspSize,
|
workSpace, wkspSize,
|
||||||
hufTable, repeat, preferRepeat, bmi2);
|
hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
||||||
@ -918,7 +1351,7 @@ size_t HUF_compress1X (void* dst, size_t dstSize,
|
|||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned huffLog)
|
unsigned maxSymbolValue, unsigned huffLog)
|
||||||
{
|
{
|
||||||
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
|
U64 workSpace[HUF_WORKSPACE_SIZE_U64];
|
||||||
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -926,7 +1359,7 @@ size_t HUF_compress2 (void* dst, size_t dstSize,
|
|||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
unsigned maxSymbolValue, unsigned huffLog)
|
unsigned maxSymbolValue, unsigned huffLog)
|
||||||
{
|
{
|
||||||
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
|
U64 workSpace[HUF_WORKSPACE_SIZE_U64];
|
||||||
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -63,7 +63,7 @@ typedef struct {
|
|||||||
} ZSTD_localDict;
|
} ZSTD_localDict;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
|
HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)];
|
||||||
HUF_repeat repeatMode;
|
HUF_repeat repeatMode;
|
||||||
} ZSTD_hufCTables_t;
|
} ZSTD_hufCTables_t;
|
||||||
|
|
||||||
@ -179,7 +179,7 @@ typedef struct {
|
|||||||
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
|
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
|
||||||
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
|
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
|
||||||
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
|
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
|
||||||
ZSTD_literalCompressionMode_e literalCompressionMode;
|
ZSTD_paramSwitch_e literalCompressionMode;
|
||||||
} optState_t;
|
} optState_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -199,6 +199,8 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
} ZSTD_window_t;
|
} ZSTD_window_t;
|
||||||
|
|
||||||
|
#define ZSTD_WINDOW_START_INDEX 2
|
||||||
|
|
||||||
typedef struct ZSTD_matchState_t ZSTD_matchState_t;
|
typedef struct ZSTD_matchState_t ZSTD_matchState_t;
|
||||||
|
|
||||||
#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
|
#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
|
||||||
@ -264,7 +266,7 @@ typedef struct {
|
|||||||
} ldmState_t;
|
} ldmState_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
U32 enableLdm; /* 1 if enable long distance matching */
|
ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
|
||||||
U32 hashLog; /* Log size of hashTable */
|
U32 hashLog; /* Log size of hashTable */
|
||||||
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
|
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
|
||||||
U32 minMatchLength; /* Minimum match length */
|
U32 minMatchLength; /* Minimum match length */
|
||||||
@ -295,7 +297,7 @@ struct ZSTD_CCtx_params_s {
|
|||||||
* There is no guarantee that hint is close to actual source size */
|
* There is no guarantee that hint is close to actual source size */
|
||||||
|
|
||||||
ZSTD_dictAttachPref_e attachDictPref;
|
ZSTD_dictAttachPref_e attachDictPref;
|
||||||
ZSTD_literalCompressionMode_e literalCompressionMode;
|
ZSTD_paramSwitch_e literalCompressionMode;
|
||||||
|
|
||||||
/* Multithreading: used to pass parameters to mtctx */
|
/* Multithreading: used to pass parameters to mtctx */
|
||||||
int nbWorkers;
|
int nbWorkers;
|
||||||
@ -318,10 +320,10 @@ struct ZSTD_CCtx_params_s {
|
|||||||
int validateSequences;
|
int validateSequences;
|
||||||
|
|
||||||
/* Block splitting */
|
/* Block splitting */
|
||||||
int splitBlocks;
|
ZSTD_paramSwitch_e useBlockSplitter;
|
||||||
|
|
||||||
/* Param for deciding whether to use row-based matchfinder */
|
/* Param for deciding whether to use row-based matchfinder */
|
||||||
ZSTD_useRowMatchFinderMode_e useRowMatchFinder;
|
ZSTD_paramSwitch_e useRowMatchFinder;
|
||||||
|
|
||||||
/* Always load a dictionary in ext-dict mode (not prefix mode)? */
|
/* Always load a dictionary in ext-dict mode (not prefix mode)? */
|
||||||
int deterministicRefPrefix;
|
int deterministicRefPrefix;
|
||||||
@ -343,6 +345,22 @@ typedef enum {
|
|||||||
ZSTDb_buffered
|
ZSTDb_buffered
|
||||||
} ZSTD_buffered_policy_e;
|
} ZSTD_buffered_policy_e;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Struct that contains all elements of block splitter that should be allocated
|
||||||
|
* in a wksp.
|
||||||
|
*/
|
||||||
|
#define ZSTD_MAX_NB_BLOCK_SPLITS 196
|
||||||
|
typedef struct {
|
||||||
|
seqStore_t fullSeqStoreChunk;
|
||||||
|
seqStore_t firstHalfSeqStore;
|
||||||
|
seqStore_t secondHalfSeqStore;
|
||||||
|
seqStore_t currSeqStore;
|
||||||
|
seqStore_t nextSeqStore;
|
||||||
|
|
||||||
|
U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
|
||||||
|
ZSTD_entropyCTablesMetadata_t entropyMetadata;
|
||||||
|
} ZSTD_blockSplitCtx;
|
||||||
|
|
||||||
struct ZSTD_CCtx_s {
|
struct ZSTD_CCtx_s {
|
||||||
ZSTD_compressionStage_e stage;
|
ZSTD_compressionStage_e stage;
|
||||||
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
|
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
|
||||||
@ -374,7 +392,7 @@ struct ZSTD_CCtx_s {
|
|||||||
ZSTD_blockState_t blockState;
|
ZSTD_blockState_t blockState;
|
||||||
U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
|
U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
|
||||||
|
|
||||||
/* Wether we are streaming or not */
|
/* Whether we are streaming or not */
|
||||||
ZSTD_buffered_policy_e bufferedPolicy;
|
ZSTD_buffered_policy_e bufferedPolicy;
|
||||||
|
|
||||||
/* streaming */
|
/* streaming */
|
||||||
@ -408,6 +426,9 @@ struct ZSTD_CCtx_s {
|
|||||||
#if ZSTD_TRACE
|
#if ZSTD_TRACE
|
||||||
ZSTD_TraceCtx traceCtx;
|
ZSTD_TraceCtx traceCtx;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Workspace for block splitter */
|
||||||
|
ZSTD_blockSplitCtx blockSplitCtx;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
|
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
|
||||||
@ -442,7 +463,7 @@ typedef enum {
|
|||||||
typedef size_t (*ZSTD_blockCompressor) (
|
typedef size_t (*ZSTD_blockCompressor) (
|
||||||
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
void const* src, size_t srcSize);
|
void const* src, size_t srcSize);
|
||||||
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
|
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
|
||||||
|
|
||||||
|
|
||||||
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
|
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
|
||||||
@ -549,17 +570,17 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
|
|||||||
return (srcSize >> minlog) + 2;
|
return (srcSize >> minlog) + 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
|
MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
|
||||||
{
|
{
|
||||||
switch (cctxParams->literalCompressionMode) {
|
switch (cctxParams->literalCompressionMode) {
|
||||||
case ZSTD_lcm_huffman:
|
case ZSTD_ps_enable:
|
||||||
return 0;
|
return 0;
|
||||||
case ZSTD_lcm_uncompressed:
|
case ZSTD_ps_disable:
|
||||||
return 1;
|
return 1;
|
||||||
default:
|
default:
|
||||||
assert(0 /* impossible: pre-validated */);
|
assert(0 /* impossible: pre-validated */);
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
case ZSTD_lcm_auto:
|
case ZSTD_ps_auto:
|
||||||
return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
|
return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -651,8 +672,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
|
|||||||
# if STATIC_BMI2
|
# if STATIC_BMI2
|
||||||
return _tzcnt_u64(val) >> 3;
|
return _tzcnt_u64(val) >> 3;
|
||||||
# else
|
# else
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
|
unsigned long r;
|
||||||
|
_BitScanForward64(&r, (U64)val);
|
||||||
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||||
return (__builtin_ctzll((U64)val) >> 3);
|
return (__builtin_ctzll((U64)val) >> 3);
|
||||||
@ -669,8 +696,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
|
|||||||
# endif
|
# endif
|
||||||
} else { /* 32 bits */
|
} else { /* 32 bits */
|
||||||
# if defined(_MSC_VER)
|
# if defined(_MSC_VER)
|
||||||
unsigned long r=0;
|
if (val != 0) {
|
||||||
return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
|
unsigned long r;
|
||||||
|
_BitScanForward(&r, (U32)val);
|
||||||
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
return (__builtin_ctz((U32)val) >> 3);
|
return (__builtin_ctz((U32)val) >> 3);
|
||||||
# else
|
# else
|
||||||
@ -687,8 +720,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
|
|||||||
# if STATIC_BMI2
|
# if STATIC_BMI2
|
||||||
return _lzcnt_u64(val) >> 3;
|
return _lzcnt_u64(val) >> 3;
|
||||||
# else
|
# else
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
|
unsigned long r;
|
||||||
|
_BitScanReverse64(&r, (U64)val);
|
||||||
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||||
return (__builtin_clzll(val) >> 3);
|
return (__builtin_clzll(val) >> 3);
|
||||||
@ -702,8 +741,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
|
|||||||
# endif
|
# endif
|
||||||
} else { /* 32 bits */
|
} else { /* 32 bits */
|
||||||
# if defined(_MSC_VER)
|
# if defined(_MSC_VER)
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
|
unsigned long r;
|
||||||
|
_BitScanReverse(&r, (unsigned long)val);
|
||||||
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
return (__builtin_clz((U32)val) >> 3);
|
return (__builtin_clz((U32)val) >> 3);
|
||||||
# else
|
# else
|
||||||
@ -884,9 +929,9 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
|
|||||||
|
|
||||||
MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
|
MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
|
||||||
{
|
{
|
||||||
return window.dictLimit == 1 &&
|
return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
|
||||||
window.lowLimit == 1 &&
|
window.lowLimit == ZSTD_WINDOW_START_INDEX &&
|
||||||
(window.nextSrc - window.base) == 1;
|
(window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -937,7 +982,9 @@ MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
|
|||||||
{
|
{
|
||||||
U32 const cycleSize = 1u << cycleLog;
|
U32 const cycleSize = 1u << cycleLog;
|
||||||
U32 const curr = (U32)((BYTE const*)src - window.base);
|
U32 const curr = (U32)((BYTE const*)src - window.base);
|
||||||
U32 const minIndexToOverflowCorrect = cycleSize + MAX(maxDist, cycleSize);
|
U32 const minIndexToOverflowCorrect = cycleSize
|
||||||
|
+ MAX(maxDist, cycleSize)
|
||||||
|
+ ZSTD_WINDOW_START_INDEX;
|
||||||
|
|
||||||
/* Adjust the min index to backoff the overflow correction frequency,
|
/* Adjust the min index to backoff the overflow correction frequency,
|
||||||
* so we don't waste too much CPU in overflow correction. If this
|
* so we don't waste too much CPU in overflow correction. If this
|
||||||
@ -1012,10 +1059,14 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
|
|||||||
U32 const cycleSize = 1u << cycleLog;
|
U32 const cycleSize = 1u << cycleLog;
|
||||||
U32 const cycleMask = cycleSize - 1;
|
U32 const cycleMask = cycleSize - 1;
|
||||||
U32 const curr = (U32)((BYTE const*)src - window->base);
|
U32 const curr = (U32)((BYTE const*)src - window->base);
|
||||||
U32 const currentCycle0 = curr & cycleMask;
|
U32 const currentCycle = curr & cycleMask;
|
||||||
/* Exclude zero so that newCurrent - maxDist >= 1. */
|
/* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
|
||||||
U32 const currentCycle1 = currentCycle0 == 0 ? cycleSize : currentCycle0;
|
U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
|
||||||
U32 const newCurrent = currentCycle1 + MAX(maxDist, cycleSize);
|
? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
|
||||||
|
: 0;
|
||||||
|
U32 const newCurrent = currentCycle
|
||||||
|
+ currentCycleCorrection
|
||||||
|
+ MAX(maxDist, cycleSize);
|
||||||
U32 const correction = curr - newCurrent;
|
U32 const correction = curr - newCurrent;
|
||||||
/* maxDist must be a power of two so that:
|
/* maxDist must be a power of two so that:
|
||||||
* (newCurrent & cycleMask) == (curr & cycleMask)
|
* (newCurrent & cycleMask) == (curr & cycleMask)
|
||||||
@ -1031,14 +1082,20 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
|
|||||||
|
|
||||||
window->base += correction;
|
window->base += correction;
|
||||||
window->dictBase += correction;
|
window->dictBase += correction;
|
||||||
if (window->lowLimit <= correction) window->lowLimit = 1;
|
if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
|
||||||
else window->lowLimit -= correction;
|
window->lowLimit = ZSTD_WINDOW_START_INDEX;
|
||||||
if (window->dictLimit <= correction) window->dictLimit = 1;
|
} else {
|
||||||
else window->dictLimit -= correction;
|
window->lowLimit -= correction;
|
||||||
|
}
|
||||||
|
if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
|
||||||
|
window->dictLimit = ZSTD_WINDOW_START_INDEX;
|
||||||
|
} else {
|
||||||
|
window->dictLimit -= correction;
|
||||||
|
}
|
||||||
|
|
||||||
/* Ensure we can still reference the full window. */
|
/* Ensure we can still reference the full window. */
|
||||||
assert(newCurrent >= maxDist);
|
assert(newCurrent >= maxDist);
|
||||||
assert(newCurrent - maxDist >= 1);
|
assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
|
||||||
/* Ensure that lowLimit and dictLimit didn't underflow. */
|
/* Ensure that lowLimit and dictLimit didn't underflow. */
|
||||||
assert(window->lowLimit <= newCurrent);
|
assert(window->lowLimit <= newCurrent);
|
||||||
assert(window->dictLimit <= newCurrent);
|
assert(window->dictLimit <= newCurrent);
|
||||||
@ -1151,9 +1208,10 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
|
|||||||
ZSTD_memset(window, 0, sizeof(*window));
|
ZSTD_memset(window, 0, sizeof(*window));
|
||||||
window->base = (BYTE const*)" ";
|
window->base = (BYTE const*)" ";
|
||||||
window->dictBase = (BYTE const*)" ";
|
window->dictBase = (BYTE const*)" ";
|
||||||
window->dictLimit = 1; /* start from 1, so that 1st position is valid */
|
ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
|
||||||
window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
|
window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
|
||||||
window->nextSrc = window->base + 1; /* see issue #1241 */
|
window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
|
||||||
|
window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
|
||||||
window->nbOverflowCorrections = 0;
|
window->nbOverflowCorrections = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +73,8 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|||||||
void* dst, size_t dstCapacity,
|
void* dst, size_t dstCapacity,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||||
const int bmi2)
|
const int bmi2,
|
||||||
|
unsigned suspectUncompressible)
|
||||||
{
|
{
|
||||||
size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
||||||
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
|
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
|
||||||
@ -105,11 +106,11 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|||||||
HUF_compress1X_repeat(
|
HUF_compress1X_repeat(
|
||||||
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
||||||
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
|
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
|
||||||
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
|
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
|
||||||
HUF_compress4X_repeat(
|
HUF_compress4X_repeat(
|
||||||
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
|
||||||
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
|
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
|
||||||
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
|
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
|
||||||
if (repeat != HUF_repeat_none) {
|
if (repeat != HUF_repeat_none) {
|
||||||
/* reused the existing table */
|
/* reused the existing table */
|
||||||
DEBUGLOG(5, "Reusing previous huffman table");
|
DEBUGLOG(5, "Reusing previous huffman table");
|
||||||
|
@ -18,12 +18,14 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
|
|||||||
|
|
||||||
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||||
|
|
||||||
|
/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||||
size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
||||||
ZSTD_hufCTables_t* nextHuf,
|
ZSTD_hufCTables_t* nextHuf,
|
||||||
ZSTD_strategy strategy, int disableLiteralCompression,
|
ZSTD_strategy strategy, int disableLiteralCompression,
|
||||||
void* dst, size_t dstCapacity,
|
void* dst, size_t dstCapacity,
|
||||||
const void* src, size_t srcSize,
|
const void* src, size_t srcSize,
|
||||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||||
const int bmi2);
|
const int bmi2,
|
||||||
|
unsigned suspectUncompressible);
|
||||||
|
|
||||||
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
||||||
|
@ -275,10 +275,11 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|||||||
assert(nbSeq_1 > 1);
|
assert(nbSeq_1 > 1);
|
||||||
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
|
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
|
||||||
(void)entropyWorkspaceSize;
|
(void)entropyWorkspaceSize;
|
||||||
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
|
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
|
||||||
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */
|
assert(oend >= op);
|
||||||
|
{ size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
|
||||||
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
|
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
|
||||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
|
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
|
||||||
return NCountSize;
|
return NCountSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -398,7 +399,7 @@ ZSTD_encodeSequences_default(
|
|||||||
|
|
||||||
#if DYNAMIC_BMI2
|
#if DYNAMIC_BMI2
|
||||||
|
|
||||||
static TARGET_ATTRIBUTE("bmi2") size_t
|
static BMI2_TARGET_ATTRIBUTE size_t
|
||||||
ZSTD_encodeSequences_bmi2(
|
ZSTD_encodeSequences_bmi2(
|
||||||
void* dst, size_t dstCapacity,
|
void* dst, size_t dstCapacity,
|
||||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||||
|
@ -132,6 +132,7 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
|
|||||||
const seqDef* sp = sstart;
|
const seqDef* sp = sstart;
|
||||||
size_t matchLengthSum = 0;
|
size_t matchLengthSum = 0;
|
||||||
size_t litLengthSum = 0;
|
size_t litLengthSum = 0;
|
||||||
|
(void)(litLengthSum); /* suppress unused variable warning on some environments */
|
||||||
while (send-sp > 0) {
|
while (send-sp > 0) {
|
||||||
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
|
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
|
||||||
litLengthSum += seqLen.litLength;
|
litLengthSum += seqLen.litLength;
|
||||||
@ -324,7 +325,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit
|
|||||||
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
|
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
|
||||||
const BYTE* codeTable, unsigned maxCode,
|
const BYTE* codeTable, unsigned maxCode,
|
||||||
size_t nbSeq, const FSE_CTable* fseCTable,
|
size_t nbSeq, const FSE_CTable* fseCTable,
|
||||||
const U32* additionalBits,
|
const U8* additionalBits,
|
||||||
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||||
void* workspace, size_t wkspSize)
|
void* workspace, size_t wkspSize)
|
||||||
{
|
{
|
||||||
|
@ -219,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
|||||||
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
||||||
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
||||||
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
||||||
* to align the beginning of the aligned secion.
|
* to align the beginning of the aligned section.
|
||||||
*
|
*
|
||||||
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
||||||
* aligneds being sized in multiples of 64 bytes.
|
* aligneds being sized in multiples of 64 bytes.
|
||||||
@ -422,8 +422,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|||||||
DEBUGLOG(5,
|
DEBUGLOG(5,
|
||||||
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
||||||
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
||||||
assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
|
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
||||||
assert((bytes & (sizeof(void*)-1)) == 0);
|
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
||||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||||
/* we must be in the first phase, no advance is possible */
|
/* we must be in the first phase, no advance is possible */
|
||||||
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
||||||
|
@ -48,10 +48,216 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
|||||||
|
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE
|
FORCE_INLINE_TEMPLATE
|
||||||
size_t ZSTD_compressBlock_doubleFast_generic(
|
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
|
||||||
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
|
void const* src, size_t srcSize, U32 const mls /* template */)
|
||||||
|
{
|
||||||
|
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||||
|
U32* const hashLong = ms->hashTable;
|
||||||
|
const U32 hBitsL = cParams->hashLog;
|
||||||
|
U32* const hashSmall = ms->chainTable;
|
||||||
|
const U32 hBitsS = cParams->chainLog;
|
||||||
|
const BYTE* const base = ms->window.base;
|
||||||
|
const BYTE* const istart = (const BYTE*)src;
|
||||||
|
const BYTE* anchor = istart;
|
||||||
|
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||||
|
/* presumes that, if there is a dictionary, it must be using Attach mode */
|
||||||
|
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||||
|
const BYTE* const prefixLowest = base + prefixLowestIndex;
|
||||||
|
const BYTE* const iend = istart + srcSize;
|
||||||
|
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||||
|
U32 offset_1=rep[0], offset_2=rep[1];
|
||||||
|
U32 offsetSaved = 0;
|
||||||
|
|
||||||
|
size_t mLength;
|
||||||
|
U32 offset;
|
||||||
|
U32 curr;
|
||||||
|
|
||||||
|
/* how many positions to search before increasing step size */
|
||||||
|
const size_t kStepIncr = 1 << kSearchStrength;
|
||||||
|
/* the position at which to increment the step size if no match is found */
|
||||||
|
const BYTE* nextStep;
|
||||||
|
size_t step; /* the current step size */
|
||||||
|
|
||||||
|
size_t hl0; /* the long hash at ip */
|
||||||
|
size_t hl1; /* the long hash at ip1 */
|
||||||
|
|
||||||
|
U32 idxl0; /* the long match index for ip */
|
||||||
|
U32 idxl1; /* the long match index for ip1 */
|
||||||
|
|
||||||
|
const BYTE* matchl0; /* the long match for ip */
|
||||||
|
const BYTE* matchs0; /* the short match for ip */
|
||||||
|
const BYTE* matchl1; /* the long match for ip1 */
|
||||||
|
|
||||||
|
const BYTE* ip = istart; /* the current position */
|
||||||
|
const BYTE* ip1; /* the next position */
|
||||||
|
|
||||||
|
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
|
||||||
|
|
||||||
|
/* init */
|
||||||
|
ip += ((ip - prefixLowest) == 0);
|
||||||
|
{
|
||||||
|
U32 const current = (U32)(ip - base);
|
||||||
|
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
|
||||||
|
U32 const maxRep = current - windowLow;
|
||||||
|
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
||||||
|
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Outer Loop: one iteration per match found and stored */
|
||||||
|
while (1) {
|
||||||
|
step = 1;
|
||||||
|
nextStep = ip + kStepIncr;
|
||||||
|
ip1 = ip + step;
|
||||||
|
|
||||||
|
if (ip1 > ilimit) {
|
||||||
|
goto _cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||||
|
idxl0 = hashLong[hl0];
|
||||||
|
matchl0 = base + idxl0;
|
||||||
|
|
||||||
|
/* Inner Loop: one iteration per search / position */
|
||||||
|
do {
|
||||||
|
const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||||
|
const U32 idxs0 = hashSmall[hs0];
|
||||||
|
curr = (U32)(ip-base);
|
||||||
|
matchs0 = base + idxs0;
|
||||||
|
|
||||||
|
hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
|
||||||
|
|
||||||
|
/* check noDict repcode */
|
||||||
|
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||||
|
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||||
|
ip++;
|
||||||
|
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
|
||||||
|
goto _match_stored;
|
||||||
|
}
|
||||||
|
|
||||||
|
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
|
||||||
|
|
||||||
|
if (idxl0 > prefixLowestIndex) {
|
||||||
|
/* check prefix long match */
|
||||||
|
if (MEM_read64(matchl0) == MEM_read64(ip)) {
|
||||||
|
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
|
||||||
|
offset = (U32)(ip-matchl0);
|
||||||
|
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
|
||||||
|
goto _match_found;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idxl1 = hashLong[hl1];
|
||||||
|
matchl1 = base + idxl1;
|
||||||
|
|
||||||
|
if (idxs0 > prefixLowestIndex) {
|
||||||
|
/* check prefix short match */
|
||||||
|
if (MEM_read32(matchs0) == MEM_read32(ip)) {
|
||||||
|
goto _search_next_long;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ip1 >= nextStep) {
|
||||||
|
PREFETCH_L1(ip1 + 64);
|
||||||
|
PREFETCH_L1(ip1 + 128);
|
||||||
|
step++;
|
||||||
|
nextStep += kStepIncr;
|
||||||
|
}
|
||||||
|
ip = ip1;
|
||||||
|
ip1 += step;
|
||||||
|
|
||||||
|
hl0 = hl1;
|
||||||
|
idxl0 = idxl1;
|
||||||
|
matchl0 = matchl1;
|
||||||
|
#if defined(__aarch64__)
|
||||||
|
PREFETCH_L1(ip+256);
|
||||||
|
#endif
|
||||||
|
} while (ip1 <= ilimit);
|
||||||
|
|
||||||
|
_cleanup:
|
||||||
|
/* save reps for next block */
|
||||||
|
rep[0] = offset_1 ? offset_1 : offsetSaved;
|
||||||
|
rep[1] = offset_2 ? offset_2 : offsetSaved;
|
||||||
|
|
||||||
|
/* Return the last literals size */
|
||||||
|
return (size_t)(iend - anchor);
|
||||||
|
|
||||||
|
_search_next_long:
|
||||||
|
|
||||||
|
/* check prefix long +1 match */
|
||||||
|
if (idxl1 > prefixLowestIndex) {
|
||||||
|
if (MEM_read64(matchl1) == MEM_read64(ip1)) {
|
||||||
|
ip = ip1;
|
||||||
|
mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
|
||||||
|
offset = (U32)(ip-matchl1);
|
||||||
|
while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
|
||||||
|
goto _match_found;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if no long +1 match, explore the short match we found */
|
||||||
|
mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
|
||||||
|
offset = (U32)(ip - matchs0);
|
||||||
|
while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
|
||||||
|
|
||||||
|
/* fall-through */
|
||||||
|
|
||||||
|
_match_found: /* requires ip, offset, mLength */
|
||||||
|
offset_2 = offset_1;
|
||||||
|
offset_1 = offset;
|
||||||
|
|
||||||
|
if (step < 4) {
|
||||||
|
/* It is unsafe to write this value back to the hashtable when ip1 is
|
||||||
|
* greater than or equal to the new ip we will have after we're done
|
||||||
|
* processing this match. Rather than perform that test directly
|
||||||
|
* (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
|
||||||
|
* more predictable test. The minmatch even if we take a short match is
|
||||||
|
* 4 bytes, so as long as step, the distance between ip and ip1
|
||||||
|
* (initially) is less than 4, we know ip1 < new ip. */
|
||||||
|
hashLong[hl1] = (U32)(ip1 - base);
|
||||||
|
}
|
||||||
|
|
||||||
|
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||||
|
|
||||||
|
_match_stored:
|
||||||
|
/* match found */
|
||||||
|
ip += mLength;
|
||||||
|
anchor = ip;
|
||||||
|
|
||||||
|
if (ip <= ilimit) {
|
||||||
|
/* Complementary insertion */
|
||||||
|
/* done after iLimit test, as candidates could be > iend-8 */
|
||||||
|
{ U32 const indexToInsert = curr+2;
|
||||||
|
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||||
|
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||||
|
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||||
|
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check immediate repcode */
|
||||||
|
while ( (ip <= ilimit)
|
||||||
|
&& ( (offset_2>0)
|
||||||
|
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||||
|
/* store sequence */
|
||||||
|
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||||
|
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
|
||||||
|
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||||
|
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||||
|
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
|
||||||
|
ip += rLength;
|
||||||
|
anchor = ip;
|
||||||
|
continue; /* faster when present ... (?) */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FORCE_INLINE_TEMPLATE
|
||||||
|
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
void const* src, size_t srcSize,
|
void const* src, size_t srcSize,
|
||||||
U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
|
U32 const mls /* template */)
|
||||||
{
|
{
|
||||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||||
U32* const hashLong = ms->hashTable;
|
U32* const hashLong = ms->hashTable;
|
||||||
@ -72,54 +278,30 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||||||
U32 offsetSaved = 0;
|
U32 offsetSaved = 0;
|
||||||
|
|
||||||
const ZSTD_matchState_t* const dms = ms->dictMatchState;
|
const ZSTD_matchState_t* const dms = ms->dictMatchState;
|
||||||
const ZSTD_compressionParameters* const dictCParams =
|
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
|
||||||
dictMode == ZSTD_dictMatchState ?
|
const U32* const dictHashLong = dms->hashTable;
|
||||||
&dms->cParams : NULL;
|
const U32* const dictHashSmall = dms->chainTable;
|
||||||
const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ?
|
const U32 dictStartIndex = dms->window.dictLimit;
|
||||||
dms->hashTable : NULL;
|
const BYTE* const dictBase = dms->window.base;
|
||||||
const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
|
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||||
dms->chainTable : NULL;
|
const BYTE* const dictEnd = dms->window.nextSrc;
|
||||||
const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
|
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
|
||||||
dms->window.dictLimit : 0;
|
const U32 dictHBitsL = dictCParams->hashLog;
|
||||||
const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
|
const U32 dictHBitsS = dictCParams->chainLog;
|
||||||
dms->window.base : NULL;
|
|
||||||
const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
|
|
||||||
dictBase + dictStartIndex : NULL;
|
|
||||||
const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
|
|
||||||
dms->window.nextSrc : NULL;
|
|
||||||
const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
|
|
||||||
prefixLowestIndex - (U32)(dictEnd - dictBase) :
|
|
||||||
0;
|
|
||||||
const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ?
|
|
||||||
dictCParams->hashLog : hBitsL;
|
|
||||||
const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
|
|
||||||
dictCParams->chainLog : hBitsS;
|
|
||||||
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
|
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
|
||||||
|
|
||||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
|
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
|
||||||
|
|
||||||
assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
|
|
||||||
|
|
||||||
/* if a dictionary is attached, it must be within window range */
|
/* if a dictionary is attached, it must be within window range */
|
||||||
if (dictMode == ZSTD_dictMatchState) {
|
|
||||||
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
|
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
|
||||||
}
|
|
||||||
|
|
||||||
/* init */
|
/* init */
|
||||||
ip += (dictAndPrefixLength == 0);
|
ip += (dictAndPrefixLength == 0);
|
||||||
if (dictMode == ZSTD_noDict) {
|
|
||||||
U32 const curr = (U32)(ip - base);
|
|
||||||
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
|
|
||||||
U32 const maxRep = curr - windowLow;
|
|
||||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
|
||||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
|
||||||
}
|
|
||||||
if (dictMode == ZSTD_dictMatchState) {
|
|
||||||
/* dictMatchState repCode checks don't currently handle repCode == 0
|
/* dictMatchState repCode checks don't currently handle repCode == 0
|
||||||
* disabling. */
|
* disabling. */
|
||||||
assert(offset_1 <= dictAndPrefixLength);
|
assert(offset_1 <= dictAndPrefixLength);
|
||||||
assert(offset_2 <= dictAndPrefixLength);
|
assert(offset_2 <= dictAndPrefixLength);
|
||||||
}
|
|
||||||
|
|
||||||
/* Main Search Loop */
|
/* Main Search Loop */
|
||||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||||
@ -135,15 +317,13 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||||||
const BYTE* matchLong = base + matchIndexL;
|
const BYTE* matchLong = base + matchIndexL;
|
||||||
const BYTE* match = base + matchIndexS;
|
const BYTE* match = base + matchIndexS;
|
||||||
const U32 repIndex = curr + 1 - offset_1;
|
const U32 repIndex = curr + 1 - offset_1;
|
||||||
const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
|
const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
|
||||||
&& repIndex < prefixLowestIndex) ?
|
|
||||||
dictBase + (repIndex - dictIndexDelta) :
|
dictBase + (repIndex - dictIndexDelta) :
|
||||||
base + repIndex;
|
base + repIndex;
|
||||||
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
|
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
|
||||||
|
|
||||||
/* check dictMatchState repcode */
|
/* check repcode */
|
||||||
if (dictMode == ZSTD_dictMatchState
|
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
|
||||||
&& ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
|
|
||||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||||
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
|
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
|
||||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
|
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
|
||||||
@ -152,15 +332,6 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||||||
goto _match_stored;
|
goto _match_stored;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check noDict repcode */
|
|
||||||
if ( dictMode == ZSTD_noDict
|
|
||||||
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
|
|
||||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
|
||||||
ip++;
|
|
||||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
|
|
||||||
goto _match_stored;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (matchIndexL > prefixLowestIndex) {
|
if (matchIndexL > prefixLowestIndex) {
|
||||||
/* check prefix long match */
|
/* check prefix long match */
|
||||||
if (MEM_read64(matchLong) == MEM_read64(ip)) {
|
if (MEM_read64(matchLong) == MEM_read64(ip)) {
|
||||||
@ -169,7 +340,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||||||
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||||
goto _match_found;
|
goto _match_found;
|
||||||
}
|
}
|
||||||
} else if (dictMode == ZSTD_dictMatchState) {
|
} else {
|
||||||
/* check dictMatchState long match */
|
/* check dictMatchState long match */
|
||||||
U32 const dictMatchIndexL = dictHashLong[dictHL];
|
U32 const dictMatchIndexL = dictHashLong[dictHL];
|
||||||
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
|
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
|
||||||
@ -187,7 +358,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
|
|||||||
if (MEM_read32(match) == MEM_read32(ip)) {
|
if (MEM_read32(match) == MEM_read32(ip)) {
|
||||||
goto _search_next_long;
|
goto _search_next_long;
|
||||||
}
|
}
|
||||||
} else if (dictMode == ZSTD_dictMatchState) {
|
} else {
|
||||||
/* check dictMatchState short match */
|
/* check dictMatchState short match */
|
||||||
U32 const dictMatchIndexS = dictHashSmall[dictHS];
|
U32 const dictMatchIndexS = dictHashSmall[dictHS];
|
||||||
match = dictBase + dictMatchIndexS;
|
match = dictBase + dictMatchIndexS;
|
||||||
@ -220,7 +391,7 @@ _search_next_long:
|
|||||||
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
|
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
|
||||||
goto _match_found;
|
goto _match_found;
|
||||||
}
|
}
|
||||||
} else if (dictMode == ZSTD_dictMatchState) {
|
} else {
|
||||||
/* check dict long +1 match */
|
/* check dict long +1 match */
|
||||||
U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
|
U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
|
||||||
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
|
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
|
||||||
@ -234,7 +405,7 @@ _search_next_long:
|
|||||||
} } }
|
} } }
|
||||||
|
|
||||||
/* if no long +1 match, explore the short match we found */
|
/* if no long +1 match, explore the short match we found */
|
||||||
if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
|
if (matchIndexS < prefixLowestIndex) {
|
||||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
|
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
|
||||||
offset = (U32)(curr - matchIndexS);
|
offset = (U32)(curr - matchIndexS);
|
||||||
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||||
@ -244,8 +415,6 @@ _search_next_long:
|
|||||||
while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* fall-through */
|
|
||||||
|
|
||||||
_match_found:
|
_match_found:
|
||||||
offset_2 = offset_1;
|
offset_2 = offset_1;
|
||||||
offset_1 = offset;
|
offset_1 = offset;
|
||||||
@ -268,12 +437,10 @@ _match_stored:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* check immediate repcode */
|
/* check immediate repcode */
|
||||||
if (dictMode == ZSTD_dictMatchState) {
|
|
||||||
while (ip <= ilimit) {
|
while (ip <= ilimit) {
|
||||||
U32 const current2 = (U32)(ip-base);
|
U32 const current2 = (U32)(ip-base);
|
||||||
U32 const repIndex2 = current2 - offset_2;
|
U32 const repIndex2 = current2 - offset_2;
|
||||||
const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
|
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
|
||||||
&& repIndex2 < prefixLowestIndex ?
|
|
||||||
dictBase + repIndex2 - dictIndexDelta :
|
dictBase + repIndex2 - dictIndexDelta :
|
||||||
base + repIndex2;
|
base + repIndex2;
|
||||||
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
|
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
|
||||||
@ -289,22 +456,8 @@ _match_stored:
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
} }
|
}
|
||||||
|
}
|
||||||
if (dictMode == ZSTD_noDict) {
|
|
||||||
while ( (ip <= ilimit)
|
|
||||||
&& ( (offset_2>0)
|
|
||||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
|
||||||
/* store sequence */
|
|
||||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
|
||||||
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
|
|
||||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
|
||||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
|
||||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
|
|
||||||
ip += rLength;
|
|
||||||
anchor = ip;
|
|
||||||
continue; /* faster when present ... (?) */
|
|
||||||
} } }
|
|
||||||
} /* while (ip < ilimit) */
|
} /* while (ip < ilimit) */
|
||||||
|
|
||||||
/* save reps for next block */
|
/* save reps for next block */
|
||||||
@ -315,6 +468,24 @@ _match_stored:
|
|||||||
return (size_t)(iend - anchor);
|
return (size_t)(iend - anchor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
|
||||||
|
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
|
||||||
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
|
||||||
|
void const* src, size_t srcSize) \
|
||||||
|
{ \
|
||||||
|
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
|
||||||
|
}
|
||||||
|
|
||||||
|
ZSTD_GEN_DFAST_FN(noDict, 4)
|
||||||
|
ZSTD_GEN_DFAST_FN(noDict, 5)
|
||||||
|
ZSTD_GEN_DFAST_FN(noDict, 6)
|
||||||
|
ZSTD_GEN_DFAST_FN(noDict, 7)
|
||||||
|
|
||||||
|
ZSTD_GEN_DFAST_FN(dictMatchState, 4)
|
||||||
|
ZSTD_GEN_DFAST_FN(dictMatchState, 5)
|
||||||
|
ZSTD_GEN_DFAST_FN(dictMatchState, 6)
|
||||||
|
ZSTD_GEN_DFAST_FN(dictMatchState, 7)
|
||||||
|
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_doubleFast(
|
size_t ZSTD_compressBlock_doubleFast(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
@ -325,13 +496,13 @@ size_t ZSTD_compressBlock_doubleFast(
|
|||||||
{
|
{
|
||||||
default: /* includes case 3 */
|
default: /* includes case 3 */
|
||||||
case 4 :
|
case 4 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
|
return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
|
||||||
case 5 :
|
case 5 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
|
return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
|
||||||
case 6 :
|
case 6 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
|
return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
|
||||||
case 7 :
|
case 7 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
|
return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,13 +516,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
|
|||||||
{
|
{
|
||||||
default: /* includes case 3 */
|
default: /* includes case 3 */
|
||||||
case 4 :
|
case 4 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
|
return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
|
||||||
case 5 :
|
case 5 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
|
return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
|
||||||
case 6 :
|
case 6 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
|
return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
|
||||||
case 7 :
|
case 7 :
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
|
return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,7 +558,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||||||
|
|
||||||
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
|
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
|
||||||
if (prefixStartIndex == dictStartIndex)
|
if (prefixStartIndex == dictStartIndex)
|
||||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
|
return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
|
||||||
|
|
||||||
/* Search Loop */
|
/* Search Loop */
|
||||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||||
@ -409,7 +580,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||||||
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
|
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
|
||||||
|
|
||||||
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
|
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
|
||||||
& (offset_1 < curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
|
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
|
||||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||||
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
|
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
|
||||||
@ -477,7 +648,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||||||
U32 const repIndex2 = current2 - offset_2;
|
U32 const repIndex2 = current2 - offset_2;
|
||||||
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
|
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
|
||||||
& (offset_2 < current2 - dictStartIndex))
|
& (offset_2 <= current2 - dictStartIndex))
|
||||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||||
@ -500,6 +671,10 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
|||||||
return (size_t)(iend - anchor);
|
return (size_t)(iend - anchor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ZSTD_GEN_DFAST_FN(extDict, 4)
|
||||||
|
ZSTD_GEN_DFAST_FN(extDict, 5)
|
||||||
|
ZSTD_GEN_DFAST_FN(extDict, 6)
|
||||||
|
ZSTD_GEN_DFAST_FN(extDict, 7)
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
@ -510,12 +685,12 @@ size_t ZSTD_compressBlock_doubleFast_extDict(
|
|||||||
{
|
{
|
||||||
default: /* includes case 3 */
|
default: /* includes case 3 */
|
||||||
case 4 :
|
case 4 :
|
||||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
|
return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
|
||||||
case 5 :
|
case 5 :
|
||||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
|
return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
|
||||||
case 6 :
|
case 6 :
|
||||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
|
return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
|
||||||
case 7 :
|
case 7 :
|
||||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
|
return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,145 +43,294 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If you squint hard enough (and ignore repcodes), the search operation at any
|
||||||
|
* given position is broken into 4 stages:
|
||||||
|
*
|
||||||
|
* 1. Hash (map position to hash value via input read)
|
||||||
|
* 2. Lookup (map hash val to index via hashtable read)
|
||||||
|
* 3. Load (map index to value at that position via input read)
|
||||||
|
* 4. Compare
|
||||||
|
*
|
||||||
|
* Each of these steps involves a memory read at an address which is computed
|
||||||
|
* from the previous step. This means these steps must be sequenced and their
|
||||||
|
* latencies are cumulative.
|
||||||
|
*
|
||||||
|
* Rather than do 1->2->3->4 sequentially for a single position before moving
|
||||||
|
* onto the next, this implementation interleaves these operations across the
|
||||||
|
* next few positions:
|
||||||
|
*
|
||||||
|
* R = Repcode Read & Compare
|
||||||
|
* H = Hash
|
||||||
|
* T = Table Lookup
|
||||||
|
* M = Match Read & Compare
|
||||||
|
*
|
||||||
|
* Pos | Time -->
|
||||||
|
* ----+-------------------
|
||||||
|
* N | ... M
|
||||||
|
* N+1 | ... TM
|
||||||
|
* N+2 | R H T M
|
||||||
|
* N+3 | H TM
|
||||||
|
* N+4 | R H T M
|
||||||
|
* N+5 | H ...
|
||||||
|
* N+6 | R ...
|
||||||
|
*
|
||||||
|
* This is very much analogous to the pipelining of execution in a CPU. And just
|
||||||
|
* like a CPU, we have to dump the pipeline when we find a match (i.e., take a
|
||||||
|
* branch).
|
||||||
|
*
|
||||||
|
* When this happens, we throw away our current state, and do the following prep
|
||||||
|
* to re-enter the loop:
|
||||||
|
*
|
||||||
|
* Pos | Time -->
|
||||||
|
* ----+-------------------
|
||||||
|
* N | H T
|
||||||
|
* N+1 | H
|
||||||
|
*
|
||||||
|
* This is also the work we do at the beginning to enter the loop initially.
|
||||||
|
*/
|
||||||
FORCE_INLINE_TEMPLATE size_t
|
FORCE_INLINE_TEMPLATE size_t
|
||||||
ZSTD_compressBlock_fast_generic(
|
ZSTD_compressBlock_fast_noDict_generic(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
void const* src, size_t srcSize,
|
void const* src, size_t srcSize,
|
||||||
U32 const mls)
|
U32 const mls, U32 const hasStep)
|
||||||
{
|
{
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||||
U32* const hashTable = ms->hashTable;
|
U32* const hashTable = ms->hashTable;
|
||||||
U32 const hlog = cParams->hashLog;
|
U32 const hlog = cParams->hashLog;
|
||||||
/* support stepSize of 0 */
|
/* support stepSize of 0 */
|
||||||
size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
|
size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
|
||||||
const BYTE* const base = ms->window.base;
|
const BYTE* const base = ms->window.base;
|
||||||
const BYTE* const istart = (const BYTE*)src;
|
const BYTE* const istart = (const BYTE*)src;
|
||||||
/* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
|
|
||||||
const BYTE* ip0 = istart;
|
|
||||||
const BYTE* ip1;
|
|
||||||
const BYTE* anchor = istart;
|
|
||||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||||
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||||
const BYTE* const iend = istart + srcSize;
|
const BYTE* const iend = istart + srcSize;
|
||||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||||
U32 offset_1=rep[0], offset_2=rep[1];
|
|
||||||
|
const BYTE* anchor = istart;
|
||||||
|
const BYTE* ip0 = istart;
|
||||||
|
const BYTE* ip1;
|
||||||
|
const BYTE* ip2;
|
||||||
|
const BYTE* ip3;
|
||||||
|
U32 current0;
|
||||||
|
|
||||||
|
U32 rep_offset1 = rep[0];
|
||||||
|
U32 rep_offset2 = rep[1];
|
||||||
U32 offsetSaved = 0;
|
U32 offsetSaved = 0;
|
||||||
|
|
||||||
/* init */
|
size_t hash0; /* hash for ip0 */
|
||||||
|
size_t hash1; /* hash for ip1 */
|
||||||
|
U32 idx; /* match idx for ip0 */
|
||||||
|
U32 mval; /* src value at match idx */
|
||||||
|
|
||||||
|
U32 offcode;
|
||||||
|
const BYTE* match0;
|
||||||
|
size_t mLength;
|
||||||
|
|
||||||
|
/* ip0 and ip1 are always adjacent. The targetLength skipping and
|
||||||
|
* uncompressibility acceleration is applied to every other position,
|
||||||
|
* matching the behavior of #1562. step therefore represents the gap
|
||||||
|
* between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
|
||||||
|
size_t step;
|
||||||
|
const BYTE* nextStep;
|
||||||
|
const size_t kStepIncr = (1 << (kSearchStrength - 1));
|
||||||
|
|
||||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
|
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
|
||||||
ip0 += (ip0 == prefixStart);
|
ip0 += (ip0 == prefixStart);
|
||||||
ip1 = ip0 + 1;
|
|
||||||
{ U32 const curr = (U32)(ip0 - base);
|
{ U32 const curr = (U32)(ip0 - base);
|
||||||
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
|
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
|
||||||
U32 const maxRep = curr - windowLow;
|
U32 const maxRep = curr - windowLow;
|
||||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
|
||||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Main Search Loop */
|
/* start each op */
|
||||||
#ifdef __INTEL_COMPILER
|
_start: /* Requires: ip0 */
|
||||||
/* From intel 'The vector pragma indicates that the loop should be
|
|
||||||
* vectorized if it is legal to do so'. Can be used together with
|
|
||||||
* #pragma ivdep (but have opted to exclude that because intel
|
|
||||||
* warns against using it).*/
|
|
||||||
#pragma vector always
|
|
||||||
#endif
|
|
||||||
while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
|
|
||||||
size_t mLength;
|
|
||||||
BYTE const* ip2 = ip0 + 2;
|
|
||||||
size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
|
|
||||||
U32 const val0 = MEM_read32(ip0);
|
|
||||||
size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
|
|
||||||
U32 const val1 = MEM_read32(ip1);
|
|
||||||
U32 const current0 = (U32)(ip0-base);
|
|
||||||
U32 const current1 = (U32)(ip1-base);
|
|
||||||
U32 const matchIndex0 = hashTable[h0];
|
|
||||||
U32 const matchIndex1 = hashTable[h1];
|
|
||||||
BYTE const* repMatch = ip2 - offset_1;
|
|
||||||
const BYTE* match0 = base + matchIndex0;
|
|
||||||
const BYTE* match1 = base + matchIndex1;
|
|
||||||
U32 offcode;
|
|
||||||
|
|
||||||
#if defined(__aarch64__)
|
step = stepSize;
|
||||||
PREFETCH_L1(ip0+256);
|
nextStep = ip0 + kStepIncr;
|
||||||
#endif
|
|
||||||
|
|
||||||
hashTable[h0] = current0; /* update hash table */
|
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
|
||||||
hashTable[h1] = current1; /* update hash table */
|
ip1 = ip0 + 1;
|
||||||
|
ip2 = ip0 + step;
|
||||||
|
ip3 = ip2 + 1;
|
||||||
|
|
||||||
assert(ip0 + 1 == ip1);
|
if (ip3 >= ilimit) {
|
||||||
|
goto _cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
|
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||||
mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
|
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||||
ip0 = ip2 - mLength;
|
|
||||||
match0 = repMatch - mLength;
|
idx = hashTable[hash0];
|
||||||
mLength += 4;
|
|
||||||
|
do {
|
||||||
|
/* load repcode match for ip[2]*/
|
||||||
|
const U32 rval = MEM_read32(ip2 - rep_offset1);
|
||||||
|
|
||||||
|
/* write back hash table entry */
|
||||||
|
current0 = (U32)(ip0 - base);
|
||||||
|
hashTable[hash0] = current0;
|
||||||
|
|
||||||
|
/* check repcode at ip[2] */
|
||||||
|
if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
|
||||||
|
ip0 = ip2;
|
||||||
|
match0 = ip0 - rep_offset1;
|
||||||
|
mLength = ip0[-1] == match0[-1];
|
||||||
|
ip0 -= mLength;
|
||||||
|
match0 -= mLength;
|
||||||
offcode = 0;
|
offcode = 0;
|
||||||
|
mLength += 4;
|
||||||
goto _match;
|
goto _match;
|
||||||
}
|
}
|
||||||
if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
|
|
||||||
/* found a regular match */
|
/* load match for ip[0] */
|
||||||
|
if (idx >= prefixStartIndex) {
|
||||||
|
mval = MEM_read32(base + idx);
|
||||||
|
} else {
|
||||||
|
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check match at ip[0] */
|
||||||
|
if (MEM_read32(ip0) == mval) {
|
||||||
|
/* found a match! */
|
||||||
goto _offset;
|
goto _offset;
|
||||||
}
|
}
|
||||||
if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
|
|
||||||
/* found a regular match after one literal */
|
/* lookup ip[1] */
|
||||||
|
idx = hashTable[hash1];
|
||||||
|
|
||||||
|
/* hash ip[2] */
|
||||||
|
hash0 = hash1;
|
||||||
|
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||||
|
|
||||||
|
/* advance to next positions */
|
||||||
ip0 = ip1;
|
ip0 = ip1;
|
||||||
match0 = match1;
|
ip1 = ip2;
|
||||||
|
ip2 = ip3;
|
||||||
|
|
||||||
|
/* write back hash table entry */
|
||||||
|
current0 = (U32)(ip0 - base);
|
||||||
|
hashTable[hash0] = current0;
|
||||||
|
|
||||||
|
/* load match for ip[0] */
|
||||||
|
if (idx >= prefixStartIndex) {
|
||||||
|
mval = MEM_read32(base + idx);
|
||||||
|
} else {
|
||||||
|
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check match at ip[0] */
|
||||||
|
if (MEM_read32(ip0) == mval) {
|
||||||
|
/* found a match! */
|
||||||
goto _offset;
|
goto _offset;
|
||||||
}
|
}
|
||||||
{ size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
|
|
||||||
assert(step >= 2);
|
/* lookup ip[1] */
|
||||||
ip0 += step;
|
idx = hashTable[hash1];
|
||||||
ip1 += step;
|
|
||||||
continue;
|
/* hash ip[2] */
|
||||||
|
hash0 = hash1;
|
||||||
|
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||||
|
|
||||||
|
/* advance to next positions */
|
||||||
|
ip0 = ip1;
|
||||||
|
ip1 = ip2;
|
||||||
|
ip2 = ip0 + step;
|
||||||
|
ip3 = ip1 + step;
|
||||||
|
|
||||||
|
/* calculate step */
|
||||||
|
if (ip2 >= nextStep) {
|
||||||
|
step++;
|
||||||
|
PREFETCH_L1(ip1 + 64);
|
||||||
|
PREFETCH_L1(ip1 + 128);
|
||||||
|
nextStep += kStepIncr;
|
||||||
}
|
}
|
||||||
_offset: /* Requires: ip0, match0 */
|
} while (ip3 < ilimit);
|
||||||
/* Compute the offset code */
|
|
||||||
offset_2 = offset_1;
|
_cleanup:
|
||||||
offset_1 = (U32)(ip0-match0);
|
/* Note that there are probably still a couple positions we could search.
|
||||||
offcode = offset_1 + ZSTD_REP_MOVE;
|
* However, it seems to be a meaningful performance hit to try to search
|
||||||
|
* them. So let's not. */
|
||||||
|
|
||||||
|
/* save reps for next block */
|
||||||
|
rep[0] = rep_offset1 ? rep_offset1 : offsetSaved;
|
||||||
|
rep[1] = rep_offset2 ? rep_offset2 : offsetSaved;
|
||||||
|
|
||||||
|
/* Return the last literals size */
|
||||||
|
return (size_t)(iend - anchor);
|
||||||
|
|
||||||
|
_offset: /* Requires: ip0, idx */
|
||||||
|
|
||||||
|
/* Compute the offset code. */
|
||||||
|
match0 = base + idx;
|
||||||
|
rep_offset2 = rep_offset1;
|
||||||
|
rep_offset1 = (U32)(ip0-match0);
|
||||||
|
offcode = rep_offset1 + ZSTD_REP_MOVE;
|
||||||
mLength = 4;
|
mLength = 4;
|
||||||
/* Count the backwards match length */
|
|
||||||
while (((ip0>anchor) & (match0>prefixStart))
|
/* Count the backwards match length. */
|
||||||
&& (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
|
while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
|
||||||
|
ip0--;
|
||||||
|
match0--;
|
||||||
|
mLength++;
|
||||||
|
}
|
||||||
|
|
||||||
_match: /* Requires: ip0, match0, offcode */
|
_match: /* Requires: ip0, match0, offcode */
|
||||||
/* Count the forward length */
|
|
||||||
|
/* Count the forward length. */
|
||||||
mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
|
mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
|
||||||
|
|
||||||
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength - MINMATCH);
|
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength - MINMATCH);
|
||||||
/* match found */
|
|
||||||
ip0 += mLength;
|
ip0 += mLength;
|
||||||
anchor = ip0;
|
anchor = ip0;
|
||||||
|
|
||||||
|
/* write next hash table entry */
|
||||||
|
if (ip1 < ip0) {
|
||||||
|
hashTable[hash1] = (U32)(ip1 - base);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fill table and check for immediate repcode. */
|
||||||
if (ip0 <= ilimit) {
|
if (ip0 <= ilimit) {
|
||||||
/* Fill Table */
|
/* Fill Table */
|
||||||
assert(base+current0+2 > istart); /* check base overflow */
|
assert(base+current0+2 > istart); /* check base overflow */
|
||||||
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
||||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||||
|
|
||||||
if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
|
if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
|
||||||
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
|
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
|
||||||
/* store sequence */
|
/* store sequence */
|
||||||
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
|
size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
|
||||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
|
||||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
||||||
ip0 += rLength;
|
ip0 += rLength;
|
||||||
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
|
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
|
||||||
anchor = ip0;
|
anchor = ip0;
|
||||||
continue; /* faster when present (confirmed on gcc-8) ... (?) */
|
continue; /* faster when present (confirmed on gcc-8) ... (?) */
|
||||||
} } }
|
} } }
|
||||||
ip1 = ip0 + 1;
|
|
||||||
|
goto _start;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* save reps for next block */
|
#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
|
||||||
rep[0] = offset_1 ? offset_1 : offsetSaved;
|
static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
|
||||||
rep[1] = offset_2 ? offset_2 : offsetSaved;
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
|
||||||
|
void const* src, size_t srcSize) \
|
||||||
/* Return the last literals size */
|
{ \
|
||||||
return (size_t)(iend - anchor);
|
return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 4, 1)
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 5, 1)
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 6, 1)
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 7, 1)
|
||||||
|
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 4, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 5, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 6, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(noDict, 7, 0)
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_fast(
|
size_t ZSTD_compressBlock_fast(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
@ -189,24 +338,40 @@ size_t ZSTD_compressBlock_fast(
|
|||||||
{
|
{
|
||||||
U32 const mls = ms->cParams.minMatch;
|
U32 const mls = ms->cParams.minMatch;
|
||||||
assert(ms->dictMatchState == NULL);
|
assert(ms->dictMatchState == NULL);
|
||||||
|
if (ms->cParams.targetLength > 1) {
|
||||||
switch(mls)
|
switch(mls)
|
||||||
{
|
{
|
||||||
default: /* includes case 3 */
|
default: /* includes case 3 */
|
||||||
case 4 :
|
case 4 :
|
||||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
|
return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
|
||||||
case 5 :
|
case 5 :
|
||||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
|
return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
|
||||||
case 6 :
|
case 6 :
|
||||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
|
return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
|
||||||
case 7 :
|
case 7 :
|
||||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
|
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
switch(mls)
|
||||||
|
{
|
||||||
|
default: /* includes case 3 */
|
||||||
|
case 4 :
|
||||||
|
return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
|
||||||
|
case 5 :
|
||||||
|
return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
|
||||||
|
case 6 :
|
||||||
|
return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
|
||||||
|
case 7 :
|
||||||
|
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCE_INLINE_TEMPLATE
|
FORCE_INLINE_TEMPLATE
|
||||||
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
void const* src, size_t srcSize, U32 const mls)
|
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
|
||||||
{
|
{
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||||
U32* const hashTable = ms->hashTable;
|
U32* const hashTable = ms->hashTable;
|
||||||
@ -242,6 +407,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
|||||||
assert(endIndex - prefixStartIndex <= maxDistance);
|
assert(endIndex - prefixStartIndex <= maxDistance);
|
||||||
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
|
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
|
||||||
|
|
||||||
|
(void)hasStep; /* not currently specialized on whether it's accelerated */
|
||||||
|
|
||||||
/* ensure there will be no underflow
|
/* ensure there will be no underflow
|
||||||
* when translating a dict index into a local index */
|
* when translating a dict index into a local index */
|
||||||
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
|
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
|
||||||
@ -351,6 +518,12 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
|||||||
return (size_t)(iend - anchor);
|
return (size_t)(iend - anchor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_fast_dictMatchState(
|
size_t ZSTD_compressBlock_fast_dictMatchState(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
void const* src, size_t srcSize)
|
void const* src, size_t srcSize)
|
||||||
@ -361,20 +534,20 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
|
|||||||
{
|
{
|
||||||
default: /* includes case 3 */
|
default: /* includes case 3 */
|
||||||
case 4 :
|
case 4 :
|
||||||
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
|
return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
|
||||||
case 5 :
|
case 5 :
|
||||||
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
|
return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
|
||||||
case 6 :
|
case 6 :
|
||||||
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
|
return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
|
||||||
case 7 :
|
case 7 :
|
||||||
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
|
return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static size_t ZSTD_compressBlock_fast_extDict_generic(
|
static size_t ZSTD_compressBlock_fast_extDict_generic(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
void const* src, size_t srcSize, U32 const mls)
|
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
|
||||||
{
|
{
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||||
U32* const hashTable = ms->hashTable;
|
U32* const hashTable = ms->hashTable;
|
||||||
@ -398,11 +571,13 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||||||
const BYTE* const ilimit = iend - 8;
|
const BYTE* const ilimit = iend - 8;
|
||||||
U32 offset_1=rep[0], offset_2=rep[1];
|
U32 offset_1=rep[0], offset_2=rep[1];
|
||||||
|
|
||||||
|
(void)hasStep; /* not currently specialized on whether it's accelerated */
|
||||||
|
|
||||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
|
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
|
||||||
|
|
||||||
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
|
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
|
||||||
if (prefixStartIndex == dictStartIndex)
|
if (prefixStartIndex == dictStartIndex)
|
||||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
|
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
|
||||||
|
|
||||||
/* Search Loop */
|
/* Search Loop */
|
||||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||||
@ -418,7 +593,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||||||
DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
|
DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
|
||||||
|
|
||||||
if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
|
if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
|
||||||
& (offset_1 < curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
|
& (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
|
||||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||||
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||||
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
|
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
|
||||||
@ -453,7 +628,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||||||
U32 const current2 = (U32)(ip-base);
|
U32 const current2 = (U32)(ip-base);
|
||||||
U32 const repIndex2 = current2 - offset_2;
|
U32 const repIndex2 = current2 - offset_2;
|
||||||
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 < curr - dictStartIndex)) /* intentional overflow */
|
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
|
||||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||||
@ -475,6 +650,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
|
|||||||
return (size_t)(iend - anchor);
|
return (size_t)(iend - anchor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ZSTD_GEN_FAST_FN(extDict, 4, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(extDict, 5, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(extDict, 6, 0)
|
||||||
|
ZSTD_GEN_FAST_FN(extDict, 7, 0)
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_fast_extDict(
|
size_t ZSTD_compressBlock_fast_extDict(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
@ -485,12 +664,12 @@ size_t ZSTD_compressBlock_fast_extDict(
|
|||||||
{
|
{
|
||||||
default: /* includes case 3 */
|
default: /* includes case 3 */
|
||||||
case 4 :
|
case 4 :
|
||||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
|
return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
|
||||||
case 5 :
|
case 5 :
|
||||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
|
return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
|
||||||
case 6 :
|
case 6 :
|
||||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
|
return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
|
||||||
case 7 :
|
case 7 :
|
||||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
|
return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -159,12 +159,12 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params)
|
|||||||
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
|
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
|
||||||
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
|
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
|
||||||
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
|
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
|
||||||
return params.enableLdm ? totalSize : 0;
|
return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
||||||
{
|
{
|
||||||
return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
|
return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** ZSTD_ldm_getBucket() :
|
/** ZSTD_ldm_getBucket() :
|
||||||
@ -478,7 +478,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
|
|||||||
*/
|
*/
|
||||||
if (anchor > ip + hashed) {
|
if (anchor > ip + hashed) {
|
||||||
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
|
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
|
||||||
/* Continue the outter loop at anchor (ip + hashed == anchor). */
|
/* Continue the outer loop at anchor (ip + hashed == anchor). */
|
||||||
ip = anchor - hashed;
|
ip = anchor - hashed;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -657,7 +657,7 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
|
|||||||
|
|
||||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
|
ZSTD_paramSwitch_e useRowMatchFinder,
|
||||||
void const* src, size_t srcSize)
|
void const* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||||
|
@ -66,7 +66,7 @@ size_t ZSTD_ldm_generateSequences(
|
|||||||
*/
|
*/
|
||||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
|
ZSTD_paramSwitch_e useRowMatchFinder,
|
||||||
void const* src, size_t srcSize);
|
void const* src, size_t srcSize);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -11,7 +11,10 @@
|
|||||||
#ifndef ZSTD_LDM_GEARTAB_H
|
#ifndef ZSTD_LDM_GEARTAB_H
|
||||||
#define ZSTD_LDM_GEARTAB_H
|
#define ZSTD_LDM_GEARTAB_H
|
||||||
|
|
||||||
static U64 ZSTD_ldm_gearTab[256] = {
|
#include "../common/compiler.h" /* UNUSED_ATTR */
|
||||||
|
#include "../common/mem.h" /* U64 */
|
||||||
|
|
||||||
|
static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = {
|
||||||
0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
|
0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
|
||||||
0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
|
0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
|
||||||
0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
|
0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
|
|
||||||
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
|
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
|
||||||
#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
|
|
||||||
#define ZSTD_MAX_PRICE (1<<30)
|
#define ZSTD_MAX_PRICE (1<<30)
|
||||||
|
|
||||||
#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
|
#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
|
||||||
@ -24,11 +23,11 @@
|
|||||||
* Price functions for optimal parser
|
* Price functions for optimal parser
|
||||||
***************************************/
|
***************************************/
|
||||||
|
|
||||||
#if 0 /* approximation at bit level */
|
#if 0 /* approximation at bit level (for tests) */
|
||||||
# define BITCOST_ACCURACY 0
|
# define BITCOST_ACCURACY 0
|
||||||
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
|
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
|
||||||
# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
|
# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
|
||||||
#elif 0 /* fractional bit accuracy */
|
#elif 0 /* fractional bit accuracy (for tests) */
|
||||||
# define BITCOST_ACCURACY 8
|
# define BITCOST_ACCURACY 8
|
||||||
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
|
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
|
||||||
# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
|
# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
|
||||||
@ -66,7 +65,7 @@ MEM_STATIC double ZSTD_fCost(U32 price)
|
|||||||
|
|
||||||
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
|
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
|
||||||
{
|
{
|
||||||
return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
|
return optPtr->literalCompressionMode != ZSTD_ps_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
|
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
|
||||||
@ -79,25 +78,46 @@ static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ZSTD_downscaleStat() :
|
static U32 sum_u32(const unsigned table[], size_t nbElts)
|
||||||
* reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
|
{
|
||||||
* return the resulting sum of elements */
|
size_t n;
|
||||||
static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
|
U32 total = 0;
|
||||||
|
for (n=0; n<nbElts; n++) {
|
||||||
|
total += table[n];
|
||||||
|
}
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
|
static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
|
||||||
{
|
{
|
||||||
U32 s, sum=0;
|
U32 s, sum=0;
|
||||||
DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
|
DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
|
||||||
assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
|
assert(shift < 30);
|
||||||
for (s=0; s<lastEltIndex+1; s++) {
|
for (s=0; s<lastEltIndex+1; s++) {
|
||||||
table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
|
table[s] = 1 + (table[s] >> shift);
|
||||||
sum += table[s];
|
sum += table[s];
|
||||||
}
|
}
|
||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ZSTD_scaleStats() :
|
||||||
|
* reduce all elements in table is sum too large
|
||||||
|
* return the resulting sum of elements */
|
||||||
|
static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
|
||||||
|
{
|
||||||
|
U32 const prevsum = sum_u32(table, lastEltIndex+1);
|
||||||
|
U32 const factor = prevsum >> logTarget;
|
||||||
|
DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
|
||||||
|
assert(logTarget < 30);
|
||||||
|
if (factor <= 1) return prevsum;
|
||||||
|
return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
|
||||||
|
}
|
||||||
|
|
||||||
/* ZSTD_rescaleFreqs() :
|
/* ZSTD_rescaleFreqs() :
|
||||||
* if first block (detected by optPtr->litLengthSum == 0) : init statistics
|
* if first block (detected by optPtr->litLengthSum == 0) : init statistics
|
||||||
* take hints from dictionary if there is one
|
* take hints from dictionary if there is one
|
||||||
* or init from zero, using src for literals stats, or flat 1 for match symbols
|
* and init from zero if there is none,
|
||||||
|
* using src for literals stats, and baseline stats for sequence symbols
|
||||||
* otherwise downscale existing stats, to be used as seed for next block.
|
* otherwise downscale existing stats, to be used as seed for next block.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
@ -126,7 +146,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|||||||
optPtr->litSum = 0;
|
optPtr->litSum = 0;
|
||||||
for (lit=0; lit<=MaxLit; lit++) {
|
for (lit=0; lit<=MaxLit; lit++) {
|
||||||
U32 const scaleLog = 11; /* scale to 2K */
|
U32 const scaleLog = 11; /* scale to 2K */
|
||||||
U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
|
U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
|
||||||
assert(bitCost <= scaleLog);
|
assert(bitCost <= scaleLog);
|
||||||
optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
|
optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
|
||||||
optPtr->litSum += optPtr->litFreq[lit];
|
optPtr->litSum += optPtr->litFreq[lit];
|
||||||
@ -174,14 +194,18 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|||||||
if (compressedLiterals) {
|
if (compressedLiterals) {
|
||||||
unsigned lit = MaxLit;
|
unsigned lit = MaxLit;
|
||||||
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
|
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
|
||||||
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
|
optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
{ unsigned ll;
|
{ unsigned const baseLLfreqs[MaxLL+1] = {
|
||||||
for (ll=0; ll<=MaxLL; ll++)
|
4, 2, 1, 1, 1, 1, 1, 1,
|
||||||
optPtr->litLengthFreq[ll] = 1;
|
1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
1, 1, 1, 1
|
||||||
|
};
|
||||||
|
ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs)); optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
|
||||||
}
|
}
|
||||||
optPtr->litLengthSum = MaxLL+1;
|
|
||||||
|
|
||||||
{ unsigned ml;
|
{ unsigned ml;
|
||||||
for (ml=0; ml<=MaxML; ml++)
|
for (ml=0; ml<=MaxML; ml++)
|
||||||
@ -189,21 +213,25 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|||||||
}
|
}
|
||||||
optPtr->matchLengthSum = MaxML+1;
|
optPtr->matchLengthSum = MaxML+1;
|
||||||
|
|
||||||
{ unsigned of;
|
{ unsigned const baseOFCfreqs[MaxOff+1] = {
|
||||||
for (of=0; of<=MaxOff; of++)
|
6, 2, 1, 1, 2, 3, 4, 4,
|
||||||
optPtr->offCodeFreq[of] = 1;
|
4, 3, 2, 1, 1, 1, 1, 1,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1
|
||||||
|
};
|
||||||
|
ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs)); optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
|
||||||
}
|
}
|
||||||
optPtr->offCodeSum = MaxOff+1;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else { /* new block : re-use previous statistics, scaled down */
|
} else { /* new block : re-use previous statistics, scaled down */
|
||||||
|
|
||||||
if (compressedLiterals)
|
if (compressedLiterals)
|
||||||
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
|
optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
|
||||||
optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
|
optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
|
||||||
optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
|
optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
|
||||||
optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
|
optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZSTD_setBasePrices(optPtr, optLevel);
|
ZSTD_setBasePrices(optPtr, optLevel);
|
||||||
@ -338,7 +366,7 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
|||||||
|
|
||||||
/* Update hashTable3 up to ip (excluded)
|
/* Update hashTable3 up to ip (excluded)
|
||||||
Assumption : always within prefix (i.e. not within extDict) */
|
Assumption : always within prefix (i.e. not within extDict) */
|
||||||
static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
|
static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
|
||||||
U32* nextToUpdate3,
|
U32* nextToUpdate3,
|
||||||
const BYTE* const ip)
|
const BYTE* const ip)
|
||||||
{
|
{
|
||||||
@ -364,11 +392,13 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
|
|||||||
* Binary Tree search
|
* Binary Tree search
|
||||||
***************************************/
|
***************************************/
|
||||||
/** ZSTD_insertBt1() : add one or multiple positions to tree.
|
/** ZSTD_insertBt1() : add one or multiple positions to tree.
|
||||||
* ip : assumed <= iend-8 .
|
* @param ip assumed <= iend-8 .
|
||||||
|
* @param target The target of ZSTD_updateTree_internal() - we are filling to this position
|
||||||
* @return : nb of positions added */
|
* @return : nb of positions added */
|
||||||
static U32 ZSTD_insertBt1(
|
static U32 ZSTD_insertBt1(
|
||||||
ZSTD_matchState_t* ms,
|
const ZSTD_matchState_t* ms,
|
||||||
const BYTE* const ip, const BYTE* const iend,
|
const BYTE* const ip, const BYTE* const iend,
|
||||||
|
U32 const target,
|
||||||
U32 const mls, const int extDict)
|
U32 const mls, const int extDict)
|
||||||
{
|
{
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||||
@ -391,7 +421,10 @@ static U32 ZSTD_insertBt1(
|
|||||||
U32* smallerPtr = bt + 2*(curr&btMask);
|
U32* smallerPtr = bt + 2*(curr&btMask);
|
||||||
U32* largerPtr = smallerPtr + 1;
|
U32* largerPtr = smallerPtr + 1;
|
||||||
U32 dummy32; /* to be nullified at the end */
|
U32 dummy32; /* to be nullified at the end */
|
||||||
U32 const windowLow = ms->window.lowLimit;
|
/* windowLow is based on target because
|
||||||
|
* we only need positions that will be in the window at the end of the tree update.
|
||||||
|
*/
|
||||||
|
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
|
||||||
U32 matchEndIdx = curr+8+1;
|
U32 matchEndIdx = curr+8+1;
|
||||||
size_t bestLength = 8;
|
size_t bestLength = 8;
|
||||||
U32 nbCompares = 1U << cParams->searchLog;
|
U32 nbCompares = 1U << cParams->searchLog;
|
||||||
@ -404,11 +437,12 @@ static U32 ZSTD_insertBt1(
|
|||||||
|
|
||||||
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
|
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
|
||||||
|
|
||||||
|
assert(curr <= target);
|
||||||
assert(ip <= iend-8); /* required for h calculation */
|
assert(ip <= iend-8); /* required for h calculation */
|
||||||
hashTable[h] = curr; /* Update Hash Table */
|
hashTable[h] = curr; /* Update Hash Table */
|
||||||
|
|
||||||
assert(windowLow > 0);
|
assert(windowLow > 0);
|
||||||
while (nbCompares-- && (matchIndex >= windowLow)) {
|
for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
|
||||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||||
assert(matchIndex < curr);
|
assert(matchIndex < curr);
|
||||||
@ -492,7 +526,7 @@ void ZSTD_updateTree_internal(
|
|||||||
idx, target, dictMode);
|
idx, target, dictMode);
|
||||||
|
|
||||||
while(idx < target) {
|
while(idx < target) {
|
||||||
U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
|
U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
|
||||||
assert(idx < (U32)(idx + forward));
|
assert(idx < (U32)(idx + forward));
|
||||||
idx += forward;
|
idx += forward;
|
||||||
}
|
}
|
||||||
@ -635,11 +669,11 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||||||
return 1;
|
return 1;
|
||||||
} } }
|
} } }
|
||||||
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
|
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
|
||||||
}
|
} /* if (mls == 3) */
|
||||||
|
|
||||||
hashTable[h] = curr; /* Update Hash Table */
|
hashTable[h] = curr; /* Update Hash Table */
|
||||||
|
|
||||||
while (nbCompares-- && (matchIndex >= matchLow)) {
|
for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
|
||||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||||
const BYTE* match;
|
const BYTE* match;
|
||||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||||
@ -672,8 +706,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||||||
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
|
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
|
||||||
if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
|
if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
|
||||||
break; /* drop, to preserve bt consistency (miss a little bit of compression) */
|
break; /* drop, to preserve bt consistency (miss a little bit of compression) */
|
||||||
}
|
} }
|
||||||
}
|
|
||||||
|
|
||||||
if (match[matchLength] < ip[matchLength]) {
|
if (match[matchLength] < ip[matchLength]) {
|
||||||
/* match smaller than current */
|
/* match smaller than current */
|
||||||
@ -692,12 +725,13 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||||||
|
|
||||||
*smallerPtr = *largerPtr = 0;
|
*smallerPtr = *largerPtr = 0;
|
||||||
|
|
||||||
|
assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
|
||||||
if (dictMode == ZSTD_dictMatchState && nbCompares) {
|
if (dictMode == ZSTD_dictMatchState && nbCompares) {
|
||||||
size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
|
size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
|
||||||
U32 dictMatchIndex = dms->hashTable[dmsH];
|
U32 dictMatchIndex = dms->hashTable[dmsH];
|
||||||
const U32* const dmsBt = dms->chainTable;
|
const U32* const dmsBt = dms->chainTable;
|
||||||
commonLengthSmaller = commonLengthLarger = 0;
|
commonLengthSmaller = commonLengthLarger = 0;
|
||||||
while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
|
for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
|
||||||
const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
|
const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
|
||||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||||
const BYTE* match = dmsBase + dictMatchIndex;
|
const BYTE* match = dmsBase + dictMatchIndex;
|
||||||
@ -718,8 +752,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||||||
if ( (matchLength > ZSTD_OPT_NUM)
|
if ( (matchLength > ZSTD_OPT_NUM)
|
||||||
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
|
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
|
||||||
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
||||||
}
|
} }
|
||||||
}
|
|
||||||
|
|
||||||
if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
|
if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
|
||||||
if (match[matchLength] < ip[matchLength]) {
|
if (match[matchLength] < ip[matchLength]) {
|
||||||
@ -729,39 +762,90 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||||||
/* match is larger than current */
|
/* match is larger than current */
|
||||||
commonLengthLarger = matchLength;
|
commonLengthLarger = matchLength;
|
||||||
dictMatchIndex = nextPtr[0];
|
dictMatchIndex = nextPtr[0];
|
||||||
}
|
} } } /* if (dictMode == ZSTD_dictMatchState) */
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(matchEndIdx > curr+8);
|
assert(matchEndIdx > curr+8);
|
||||||
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||||
return mnum;
|
return mnum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef U32 (*ZSTD_getAllMatchesFn)(
|
||||||
FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
|
ZSTD_match_t*,
|
||||||
ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */
|
ZSTD_matchState_t*,
|
||||||
ZSTD_matchState_t* ms,
|
U32*,
|
||||||
U32* nextToUpdate3,
|
const BYTE*,
|
||||||
const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
|
const BYTE*,
|
||||||
const U32 rep[ZSTD_REP_NUM],
|
const U32 rep[ZSTD_REP_NUM],
|
||||||
U32 const ll0,
|
U32 const ll0,
|
||||||
U32 const lengthToBeat)
|
U32 const lengthToBeat);
|
||||||
|
|
||||||
|
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
|
||||||
|
ZSTD_match_t* matches,
|
||||||
|
ZSTD_matchState_t* ms,
|
||||||
|
U32* nextToUpdate3,
|
||||||
|
const BYTE* ip,
|
||||||
|
const BYTE* const iHighLimit,
|
||||||
|
const U32 rep[ZSTD_REP_NUM],
|
||||||
|
U32 const ll0,
|
||||||
|
U32 const lengthToBeat,
|
||||||
|
const ZSTD_dictMode_e dictMode,
|
||||||
|
const U32 mls)
|
||||||
{
|
{
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
|
||||||
U32 const matchLengthSearch = cParams->minMatch;
|
DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
|
||||||
DEBUGLOG(8, "ZSTD_BtGetAllMatches");
|
if (ip < ms->window.base + ms->nextToUpdate)
|
||||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
return 0; /* skipped area */
|
||||||
ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
|
ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
|
||||||
switch(matchLengthSearch)
|
return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
|
||||||
{
|
|
||||||
case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
|
|
||||||
default :
|
|
||||||
case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
|
|
||||||
case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
|
|
||||||
case 7 :
|
|
||||||
case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
|
||||||
|
|
||||||
|
#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
|
||||||
|
static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
|
||||||
|
ZSTD_match_t* matches, \
|
||||||
|
ZSTD_matchState_t* ms, \
|
||||||
|
U32* nextToUpdate3, \
|
||||||
|
const BYTE* ip, \
|
||||||
|
const BYTE* const iHighLimit, \
|
||||||
|
const U32 rep[ZSTD_REP_NUM], \
|
||||||
|
U32 const ll0, \
|
||||||
|
U32 const lengthToBeat) \
|
||||||
|
{ \
|
||||||
|
return ZSTD_btGetAllMatches_internal( \
|
||||||
|
matches, ms, nextToUpdate3, ip, iHighLimit, \
|
||||||
|
rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
|
||||||
|
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
|
||||||
|
GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
|
||||||
|
|
||||||
|
#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
|
||||||
|
{ \
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
|
||||||
|
}
|
||||||
|
|
||||||
|
static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
|
||||||
|
{
|
||||||
|
ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
|
||||||
|
ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
|
||||||
|
};
|
||||||
|
U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
|
||||||
|
assert((U32)dictMode < 3);
|
||||||
|
assert(mls - 3 < 4);
|
||||||
|
return getAllMatchesFns[(int)dictMode][mls - 3];
|
||||||
}
|
}
|
||||||
|
|
||||||
/*************************
|
/*************************
|
||||||
@ -899,11 +983,11 @@ static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_
|
|||||||
ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
|
ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*-*******************************
|
/*-*******************************
|
||||||
* Optimal parser
|
* Optimal parser
|
||||||
*********************************/
|
*********************************/
|
||||||
|
|
||||||
|
|
||||||
static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
|
static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
|
||||||
{
|
{
|
||||||
return sol.litlen + sol.mlen;
|
return sol.litlen + sol.mlen;
|
||||||
@ -944,6 +1028,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
const BYTE* const prefixStart = base + ms->window.dictLimit;
|
const BYTE* const prefixStart = base + ms->window.dictLimit;
|
||||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||||
|
|
||||||
|
ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
|
||||||
|
|
||||||
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
|
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
|
||||||
U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
|
U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
|
||||||
U32 nextToUpdate3 = ms->nextToUpdate;
|
U32 nextToUpdate3 = ms->nextToUpdate;
|
||||||
@ -971,7 +1057,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
/* find first match */
|
/* find first match */
|
||||||
{ U32 const litlen = (U32)(ip - anchor);
|
{ U32 const litlen = (U32)(ip - anchor);
|
||||||
U32 const ll0 = !litlen;
|
U32 const ll0 = !litlen;
|
||||||
U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
|
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
|
||||||
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
|
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
|
||||||
(U32)(ip-istart), (U32)(iend - ip));
|
(U32)(ip-istart), (U32)(iend - ip));
|
||||||
if (!nbMatches) { ip++; continue; }
|
if (!nbMatches) { ip++; continue; }
|
||||||
@ -985,7 +1071,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
* in every price. We include the literal length to avoid negative
|
* in every price. We include the literal length to avoid negative
|
||||||
* prices when we subtract the previous literal length.
|
* prices when we subtract the previous literal length.
|
||||||
*/
|
*/
|
||||||
opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
|
opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
|
||||||
|
|
||||||
/* large match -> immediate encoding */
|
/* large match -> immediate encoding */
|
||||||
{ U32 const maxML = matches[nbMatches-1].len;
|
{ U32 const maxML = matches[nbMatches-1].len;
|
||||||
@ -1005,7 +1091,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
} }
|
} }
|
||||||
|
|
||||||
/* set prices for first matches starting position == 0 */
|
/* set prices for first matches starting position == 0 */
|
||||||
{ U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
|
assert(opt[0].price >= 0);
|
||||||
|
{ U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
|
||||||
U32 pos;
|
U32 pos;
|
||||||
U32 matchNb;
|
U32 matchNb;
|
||||||
for (pos = 1; pos < minMatch; pos++) {
|
for (pos = 1; pos < minMatch; pos++) {
|
||||||
@ -1022,7 +1109,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
opt[pos].mlen = pos;
|
opt[pos].mlen = pos;
|
||||||
opt[pos].off = offset;
|
opt[pos].off = offset;
|
||||||
opt[pos].litlen = litlen;
|
opt[pos].litlen = litlen;
|
||||||
opt[pos].price = sequencePrice;
|
opt[pos].price = (int)sequencePrice;
|
||||||
} }
|
} }
|
||||||
last_pos = pos-1;
|
last_pos = pos-1;
|
||||||
}
|
}
|
||||||
@ -1037,9 +1124,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
/* Fix current position with one literal if cheaper */
|
/* Fix current position with one literal if cheaper */
|
||||||
{ U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
|
{ U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
|
||||||
int const price = opt[cur-1].price
|
int const price = opt[cur-1].price
|
||||||
+ ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
|
+ (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
|
||||||
+ ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
|
+ (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
|
||||||
- ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
|
- (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
|
||||||
assert(price < 1000000000); /* overflow check */
|
assert(price < 1000000000); /* overflow check */
|
||||||
if (price <= opt[cur].price) {
|
if (price <= opt[cur].price) {
|
||||||
DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
|
DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
|
||||||
@ -1082,11 +1169,12 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
|
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(opt[cur].price >= 0);
|
||||||
{ U32 const ll0 = (opt[cur].mlen != 0);
|
{ U32 const ll0 = (opt[cur].mlen != 0);
|
||||||
U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
|
U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
|
||||||
U32 const previousPrice = opt[cur].price;
|
U32 const previousPrice = (U32)opt[cur].price;
|
||||||
U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
|
U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
|
||||||
U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
|
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
|
||||||
U32 matchNb;
|
U32 matchNb;
|
||||||
|
|
||||||
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
|
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
|
||||||
@ -1124,7 +1212,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|||||||
|
|
||||||
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
|
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
|
||||||
U32 const pos = cur + mlen;
|
U32 const pos = cur + mlen;
|
||||||
int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
|
int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
|
||||||
|
|
||||||
if ((pos > last_pos) || (price < opt[pos].price)) {
|
if ((pos > last_pos) || (price < opt[pos].price)) {
|
||||||
DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
|
DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
|
||||||
@ -1210,38 +1298,30 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
|
|||||||
return (size_t)(iend - anchor);
|
return (size_t)(iend - anchor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static size_t ZSTD_compressBlock_opt0(
|
||||||
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
|
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
|
||||||
|
{
|
||||||
|
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t ZSTD_compressBlock_opt2(
|
||||||
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
|
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
|
||||||
|
{
|
||||||
|
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
|
||||||
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btopt(
|
size_t ZSTD_compressBlock_btopt(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
const void* src, size_t srcSize)
|
const void* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
|
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
|
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* used in 2-pass strategy */
|
|
||||||
static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
|
|
||||||
{
|
|
||||||
U32 s, sum=0;
|
|
||||||
assert(ZSTD_FREQ_DIV+bonus >= 0);
|
|
||||||
for (s=0; s<lastEltIndex+1; s++) {
|
|
||||||
table[s] <<= ZSTD_FREQ_DIV+bonus;
|
|
||||||
table[s]--;
|
|
||||||
sum += table[s];
|
|
||||||
}
|
|
||||||
return sum;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* used in 2-pass strategy */
|
|
||||||
MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
|
|
||||||
{
|
|
||||||
if (ZSTD_compressedLiterals(optPtr))
|
|
||||||
optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
|
|
||||||
optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
|
|
||||||
optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
|
|
||||||
optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ZSTD_initStats_ultra():
|
/* ZSTD_initStats_ultra():
|
||||||
* make a first compression pass, just to seed stats with more accurate starting values.
|
* make a first compression pass, just to seed stats with more accurate starting values.
|
||||||
@ -1263,7 +1343,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
|
|||||||
assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
|
assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
|
||||||
assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
|
assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
|
||||||
|
|
||||||
ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/
|
ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
|
||||||
|
|
||||||
/* invalidate first scan from history */
|
/* invalidate first scan from history */
|
||||||
ZSTD_resetSeqStore(seqStore);
|
ZSTD_resetSeqStore(seqStore);
|
||||||
@ -1272,8 +1352,6 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
|
|||||||
ms->window.lowLimit = ms->window.dictLimit;
|
ms->window.lowLimit = ms->window.dictLimit;
|
||||||
ms->nextToUpdate = ms->window.dictLimit;
|
ms->nextToUpdate = ms->window.dictLimit;
|
||||||
|
|
||||||
/* re-inforce weight of collected statistics */
|
|
||||||
ZSTD_upscaleStats(&ms->opt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btultra(
|
size_t ZSTD_compressBlock_btultra(
|
||||||
@ -1281,7 +1359,7 @@ size_t ZSTD_compressBlock_btultra(
|
|||||||
const void* src, size_t srcSize)
|
const void* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
|
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
|
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btultra2(
|
size_t ZSTD_compressBlock_btultra2(
|
||||||
@ -1309,35 +1387,35 @@ size_t ZSTD_compressBlock_btultra2(
|
|||||||
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
|
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
|
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btopt_dictMatchState(
|
size_t ZSTD_compressBlock_btopt_dictMatchState(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
const void* src, size_t srcSize)
|
const void* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
|
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btultra_dictMatchState(
|
size_t ZSTD_compressBlock_btultra_dictMatchState(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
const void* src, size_t srcSize)
|
const void* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
|
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btopt_extDict(
|
size_t ZSTD_compressBlock_btopt_extDict(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
const void* src, size_t srcSize)
|
const void* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
|
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_compressBlock_btultra_extDict(
|
size_t ZSTD_compressBlock_btultra_extDict(
|
||||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||||
const void* src, size_t srcSize)
|
const void* src, size_t srcSize)
|
||||||
{
|
{
|
||||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
|
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* note : no btultra2 variant for extDict nor dictMatchState,
|
/* note : no btultra2 variant for extDict nor dictMatchState,
|
||||||
|
@ -467,7 +467,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|||||||
ZSTD_dictContentType_e dictContentType)
|
ZSTD_dictContentType_e dictContentType)
|
||||||
{
|
{
|
||||||
/* Adjust parameters */
|
/* Adjust parameters */
|
||||||
if (params.ldmParams.enableLdm) {
|
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
||||||
DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
|
DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
|
||||||
ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
|
ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
|
||||||
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
|
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
|
||||||
@ -478,7 +478,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|||||||
serialState->nextJobID = 0;
|
serialState->nextJobID = 0;
|
||||||
if (params.fParams.checksumFlag)
|
if (params.fParams.checksumFlag)
|
||||||
XXH64_reset(&serialState->xxhState, 0);
|
XXH64_reset(&serialState->xxhState, 0);
|
||||||
if (params.ldmParams.enableLdm) {
|
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
||||||
ZSTD_customMem cMem = params.customMem;
|
ZSTD_customMem cMem = params.customMem;
|
||||||
unsigned const hashLog = params.ldmParams.hashLog;
|
unsigned const hashLog = params.ldmParams.hashLog;
|
||||||
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
|
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
|
||||||
@ -564,7 +564,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
|
|||||||
/* A future job may error and skip our job */
|
/* A future job may error and skip our job */
|
||||||
if (serialState->nextJobID == jobID) {
|
if (serialState->nextJobID == jobID) {
|
||||||
/* It is now our turn, do any processing necessary */
|
/* It is now our turn, do any processing necessary */
|
||||||
if (serialState->params.ldmParams.enableLdm) {
|
if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
||||||
size_t error;
|
size_t error;
|
||||||
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
|
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
|
||||||
seqStore.size == 0 && seqStore.capacity > 0);
|
seqStore.size == 0 && seqStore.capacity > 0);
|
||||||
@ -594,7 +594,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
|
|||||||
if (seqStore.size > 0) {
|
if (seqStore.size > 0) {
|
||||||
size_t const err = ZSTD_referenceExternalSequences(
|
size_t const err = ZSTD_referenceExternalSequences(
|
||||||
jobCCtx, seqStore.seq, seqStore.size);
|
jobCCtx, seqStore.seq, seqStore.size);
|
||||||
assert(serialState->params.ldmParams.enableLdm);
|
assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
|
||||||
assert(!ZSTD_isError(err));
|
assert(!ZSTD_isError(err));
|
||||||
(void)err;
|
(void)err;
|
||||||
}
|
}
|
||||||
@ -672,7 +672,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|||||||
if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
|
if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
|
||||||
job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
|
job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
|
||||||
}
|
}
|
||||||
if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
|
if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
|
||||||
JOB_ERROR(ERROR(memory_allocation));
|
JOB_ERROR(ERROR(memory_allocation));
|
||||||
|
|
||||||
/* Don't compute the checksum for chunks, since we compute it externally,
|
/* Don't compute the checksum for chunks, since we compute it externally,
|
||||||
@ -680,7 +680,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|||||||
*/
|
*/
|
||||||
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
|
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
|
||||||
/* Don't run LDM for the chunks, since we handle it externally */
|
/* Don't run LDM for the chunks, since we handle it externally */
|
||||||
jobParams.ldmParams.enableLdm = 0;
|
jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
|
||||||
/* Correct nbWorkers to 0. */
|
/* Correct nbWorkers to 0. */
|
||||||
jobParams.nbWorkers = 0;
|
jobParams.nbWorkers = 0;
|
||||||
|
|
||||||
@ -807,6 +807,15 @@ typedef struct {
|
|||||||
static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
|
static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
|
||||||
|
|
||||||
#define RSYNC_LENGTH 32
|
#define RSYNC_LENGTH 32
|
||||||
|
/* Don't create chunks smaller than the zstd block size.
|
||||||
|
* This stops us from regressing compression ratio too much,
|
||||||
|
* and ensures our output fits in ZSTD_compressBound().
|
||||||
|
*
|
||||||
|
* If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
|
||||||
|
* ZSTD_COMPRESSBOUND() will need to be updated.
|
||||||
|
*/
|
||||||
|
#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
|
||||||
|
#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
U64 hash;
|
U64 hash;
|
||||||
@ -1135,7 +1144,7 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
|
|||||||
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
|
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
|
||||||
{
|
{
|
||||||
unsigned jobLog;
|
unsigned jobLog;
|
||||||
if (params->ldmParams.enableLdm) {
|
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
|
||||||
/* In Long Range Mode, the windowLog is typically oversized.
|
/* In Long Range Mode, the windowLog is typically oversized.
|
||||||
* In which case, it's preferable to determine the jobSize
|
* In which case, it's preferable to determine the jobSize
|
||||||
* based on cycleLog instead. */
|
* based on cycleLog instead. */
|
||||||
@ -1179,7 +1188,7 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
|
|||||||
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
|
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
|
||||||
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
|
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
|
||||||
assert(0 <= overlapRLog && overlapRLog <= 8);
|
assert(0 <= overlapRLog && overlapRLog <= 8);
|
||||||
if (params->ldmParams.enableLdm) {
|
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
|
||||||
/* In Long Range Mode, the windowLog is typically oversized.
|
/* In Long Range Mode, the windowLog is typically oversized.
|
||||||
* In which case, it's preferable to determine the jobSize
|
* In which case, it's preferable to determine the jobSize
|
||||||
* based on chainLog instead.
|
* based on chainLog instead.
|
||||||
@ -1252,6 +1261,9 @@ size_t ZSTDMT_initCStream_internal(
|
|||||||
/* Aim for the targetsectionSize as the average job size. */
|
/* Aim for the targetsectionSize as the average job size. */
|
||||||
U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
|
U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
|
||||||
U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
|
U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
|
||||||
|
/* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
|
||||||
|
* expected job size is at least 4x larger. */
|
||||||
|
assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
|
||||||
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
|
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
|
||||||
mtctx->rsync.hash = 0;
|
mtctx->rsync.hash = 0;
|
||||||
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
|
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
|
||||||
@ -1263,7 +1275,7 @@ size_t ZSTDMT_initCStream_internal(
|
|||||||
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
|
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
|
||||||
{
|
{
|
||||||
/* If ldm is enabled we need windowSize space. */
|
/* If ldm is enabled we need windowSize space. */
|
||||||
size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
|
size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
|
||||||
/* Two buffers of slack, plus extra space for the overlap
|
/* Two buffers of slack, plus extra space for the overlap
|
||||||
* This is the minimum slack that LDM works with. One extra because
|
* This is the minimum slack that LDM works with. One extra because
|
||||||
* flush might waste up to targetSectionSize-1 bytes. Another extra
|
* flush might waste up to targetSectionSize-1 bytes. Another extra
|
||||||
@ -1538,18 +1550,22 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
|
|||||||
static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
|
static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
|
||||||
{
|
{
|
||||||
BYTE const* const bufferStart = (BYTE const*)buffer.start;
|
BYTE const* const bufferStart = (BYTE const*)buffer.start;
|
||||||
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
|
|
||||||
BYTE const* const rangeStart = (BYTE const*)range.start;
|
BYTE const* const rangeStart = (BYTE const*)range.start;
|
||||||
BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart;
|
|
||||||
|
|
||||||
if (rangeStart == NULL || bufferStart == NULL)
|
if (rangeStart == NULL || bufferStart == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
|
||||||
|
BYTE const* const rangeEnd = rangeStart + range.size;
|
||||||
|
|
||||||
/* Empty ranges cannot overlap */
|
/* Empty ranges cannot overlap */
|
||||||
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
|
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return bufferStart < rangeEnd && rangeStart < bufferEnd;
|
return bufferStart < rangeEnd && rangeStart < bufferEnd;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|
static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|
||||||
{
|
{
|
||||||
@ -1575,7 +1591,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|
|||||||
|
|
||||||
static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
|
static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
|
||||||
{
|
{
|
||||||
if (mtctx->params.ldmParams.enableLdm) {
|
if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
||||||
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
|
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
|
||||||
DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
|
DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
|
||||||
DEBUGLOG(5, "source [0x%zx, 0x%zx)",
|
DEBUGLOG(5, "source [0x%zx, 0x%zx)",
|
||||||
@ -1678,6 +1694,11 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|||||||
if (!mtctx->params.rsyncable)
|
if (!mtctx->params.rsyncable)
|
||||||
/* Rsync is disabled. */
|
/* Rsync is disabled. */
|
||||||
return syncPoint;
|
return syncPoint;
|
||||||
|
if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE)
|
||||||
|
/* We don't emit synchronization points if it would produce too small blocks.
|
||||||
|
* We don't have enough input to find a synchronization point, so don't look.
|
||||||
|
*/
|
||||||
|
return syncPoint;
|
||||||
if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
|
if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
|
||||||
/* Not enough to compute the hash.
|
/* Not enough to compute the hash.
|
||||||
* We will miss any synchronization points in this RSYNC_LENGTH byte
|
* We will miss any synchronization points in this RSYNC_LENGTH byte
|
||||||
@ -1688,10 +1709,28 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|||||||
*/
|
*/
|
||||||
return syncPoint;
|
return syncPoint;
|
||||||
/* Initialize the loop variables. */
|
/* Initialize the loop variables. */
|
||||||
if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
|
if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
|
||||||
/* We have enough bytes buffered to initialize the hash.
|
/* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
|
||||||
|
* because they can't possibly be a sync point. So we can start
|
||||||
|
* part way through the input buffer.
|
||||||
|
*/
|
||||||
|
pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
|
||||||
|
if (pos >= RSYNC_LENGTH) {
|
||||||
|
prev = istart + pos - RSYNC_LENGTH;
|
||||||
|
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
||||||
|
} else {
|
||||||
|
assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
|
||||||
|
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
||||||
|
hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
|
||||||
|
hash = ZSTD_rollingHash_append(hash, istart, pos);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* We have enough bytes buffered to initialize the hash,
|
||||||
|
* and are have processed enough bytes to find a sync point.
|
||||||
* Start scanning at the beginning of the input.
|
* Start scanning at the beginning of the input.
|
||||||
*/
|
*/
|
||||||
|
assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE);
|
||||||
|
assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH);
|
||||||
pos = 0;
|
pos = 0;
|
||||||
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
||||||
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
||||||
@ -1705,16 +1744,6 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|||||||
syncPoint.flush = 1;
|
syncPoint.flush = 1;
|
||||||
return syncPoint;
|
return syncPoint;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
/* We don't have enough bytes buffered to initialize the hash, but
|
|
||||||
* we know we have at least RSYNC_LENGTH bytes total.
|
|
||||||
* Start scanning after the first RSYNC_LENGTH bytes less the bytes
|
|
||||||
* already buffered.
|
|
||||||
*/
|
|
||||||
pos = RSYNC_LENGTH - mtctx->inBuff.filled;
|
|
||||||
prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
|
|
||||||
hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
|
|
||||||
hash = ZSTD_rollingHash_append(hash, istart, pos);
|
|
||||||
}
|
}
|
||||||
/* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
|
/* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
|
||||||
* through the input. If we hit a synchronization point, then cut the
|
* through the input. If we hit a synchronization point, then cut the
|
||||||
@ -1726,8 +1755,9 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|||||||
*/
|
*/
|
||||||
for (; pos < syncPoint.toLoad; ++pos) {
|
for (; pos < syncPoint.toLoad; ++pos) {
|
||||||
BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
|
BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
|
||||||
/* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
|
assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
|
||||||
hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
|
hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
|
||||||
|
assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE);
|
||||||
if ((hash & hitMask) == hitMask) {
|
if ((hash & hitMask) == hitMask) {
|
||||||
syncPoint.toLoad = pos + 1;
|
syncPoint.toLoad = pos + 1;
|
||||||
syncPoint.flush = 1;
|
syncPoint.flush = 1;
|
||||||
|
File diff suppressed because it is too large
Load Diff
571
zstd/decompress/huf_decompress_amd64.S
Normal file
571
zstd/decompress/huf_decompress_amd64.S
Normal file
@ -0,0 +1,571 @@
|
|||||||
|
#include "../common/portability_macros.h"
|
||||||
|
|
||||||
|
#if ZSTD_ENABLE_ASM_X86_64_BMI2
|
||||||
|
|
||||||
|
/* Stack marking
|
||||||
|
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
|
||||||
|
*/
|
||||||
|
#if defined(__linux__) && defined(__ELF__)
|
||||||
|
.section .note.GNU-stack,"",%progbits
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Calling convention:
|
||||||
|
*
|
||||||
|
* %rdi contains the first argument: HUF_DecompressAsmArgs*.
|
||||||
|
* %rbp isn't maintained (no frame pointer).
|
||||||
|
* %rsp contains the stack pointer that grows down.
|
||||||
|
* No red-zone is assumed, only addresses >= %rsp are used.
|
||||||
|
* All register contents are preserved.
|
||||||
|
*
|
||||||
|
* TODO: Support Windows calling convention.
|
||||||
|
*/
|
||||||
|
|
||||||
|
.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop
|
||||||
|
.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop
|
||||||
|
.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop
|
||||||
|
.global _HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop
|
||||||
|
.text
|
||||||
|
|
||||||
|
/* Sets up register mappings for clarity.
|
||||||
|
* op[], bits[], dtable & ip[0] each get their own register.
|
||||||
|
* ip[1,2,3] & olimit alias var[].
|
||||||
|
* %rax is a scratch register.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define op0 rsi
|
||||||
|
#define op1 rbx
|
||||||
|
#define op2 rcx
|
||||||
|
#define op3 rdi
|
||||||
|
|
||||||
|
#define ip0 r8
|
||||||
|
#define ip1 r9
|
||||||
|
#define ip2 r10
|
||||||
|
#define ip3 r11
|
||||||
|
|
||||||
|
#define bits0 rbp
|
||||||
|
#define bits1 rdx
|
||||||
|
#define bits2 r12
|
||||||
|
#define bits3 r13
|
||||||
|
#define dtable r14
|
||||||
|
#define olimit r15
|
||||||
|
|
||||||
|
/* var[] aliases ip[1,2,3] & olimit
|
||||||
|
* ip[1,2,3] are saved every iteration.
|
||||||
|
* olimit is only used in compute_olimit.
|
||||||
|
*/
|
||||||
|
#define var0 r15
|
||||||
|
#define var1 r9
|
||||||
|
#define var2 r10
|
||||||
|
#define var3 r11
|
||||||
|
|
||||||
|
/* 32-bit var registers */
|
||||||
|
#define vard0 r15d
|
||||||
|
#define vard1 r9d
|
||||||
|
#define vard2 r10d
|
||||||
|
#define vard3 r11d
|
||||||
|
|
||||||
|
/* Calls X(N) for each stream 0, 1, 2, 3. */
|
||||||
|
#define FOR_EACH_STREAM(X) \
|
||||||
|
X(0); \
|
||||||
|
X(1); \
|
||||||
|
X(2); \
|
||||||
|
X(3)
|
||||||
|
|
||||||
|
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
|
||||||
|
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
|
||||||
|
X(0, idx); \
|
||||||
|
X(1, idx); \
|
||||||
|
X(2, idx); \
|
||||||
|
X(3, idx)
|
||||||
|
|
||||||
|
/* Define both _HUF_* & HUF_* symbols because MacOS
|
||||||
|
* C symbols are prefixed with '_' & Linux symbols aren't.
|
||||||
|
*/
|
||||||
|
_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop:
|
||||||
|
HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop:
|
||||||
|
/* Save all registers - even if they are callee saved for simplicity. */
|
||||||
|
push %rax
|
||||||
|
push %rbx
|
||||||
|
push %rcx
|
||||||
|
push %rdx
|
||||||
|
push %rbp
|
||||||
|
push %rsi
|
||||||
|
push %rdi
|
||||||
|
push %r8
|
||||||
|
push %r9
|
||||||
|
push %r10
|
||||||
|
push %r11
|
||||||
|
push %r12
|
||||||
|
push %r13
|
||||||
|
push %r14
|
||||||
|
push %r15
|
||||||
|
|
||||||
|
/* Read HUF_DecompressAsmArgs* args from %rax */
|
||||||
|
movq %rdi, %rax
|
||||||
|
movq 0(%rax), %ip0
|
||||||
|
movq 8(%rax), %ip1
|
||||||
|
movq 16(%rax), %ip2
|
||||||
|
movq 24(%rax), %ip3
|
||||||
|
movq 32(%rax), %op0
|
||||||
|
movq 40(%rax), %op1
|
||||||
|
movq 48(%rax), %op2
|
||||||
|
movq 56(%rax), %op3
|
||||||
|
movq 64(%rax), %bits0
|
||||||
|
movq 72(%rax), %bits1
|
||||||
|
movq 80(%rax), %bits2
|
||||||
|
movq 88(%rax), %bits3
|
||||||
|
movq 96(%rax), %dtable
|
||||||
|
push %rax /* argument */
|
||||||
|
push 104(%rax) /* ilimit */
|
||||||
|
push 112(%rax) /* oend */
|
||||||
|
push %olimit /* olimit space */
|
||||||
|
|
||||||
|
subq $24, %rsp
|
||||||
|
|
||||||
|
.L_4X1_compute_olimit:
|
||||||
|
/* Computes how many iterations we can do safely
|
||||||
|
* %r15, %rax may be clobbered
|
||||||
|
* rbx, rdx must be saved
|
||||||
|
* op3 & ip0 mustn't be clobbered
|
||||||
|
*/
|
||||||
|
movq %rbx, 0(%rsp)
|
||||||
|
movq %rdx, 8(%rsp)
|
||||||
|
|
||||||
|
movq 32(%rsp), %rax /* rax = oend */
|
||||||
|
subq %op3, %rax /* rax = oend - op3 */
|
||||||
|
|
||||||
|
/* r15 = (oend - op3) / 5 */
|
||||||
|
movabsq $-3689348814741910323, %rdx
|
||||||
|
mulq %rdx
|
||||||
|
movq %rdx, %r15
|
||||||
|
shrq $2, %r15
|
||||||
|
|
||||||
|
movq %ip0, %rax /* rax = ip0 */
|
||||||
|
movq 40(%rsp), %rdx /* rdx = ilimit */
|
||||||
|
subq %rdx, %rax /* rax = ip0 - ilimit */
|
||||||
|
movq %rax, %rbx /* rbx = ip0 - ilimit */
|
||||||
|
|
||||||
|
/* rdx = (ip0 - ilimit) / 7 */
|
||||||
|
movabsq $2635249153387078803, %rdx
|
||||||
|
mulq %rdx
|
||||||
|
subq %rdx, %rbx
|
||||||
|
shrq %rbx
|
||||||
|
addq %rbx, %rdx
|
||||||
|
shrq $2, %rdx
|
||||||
|
|
||||||
|
/* r15 = min(%rdx, %r15) */
|
||||||
|
cmpq %rdx, %r15
|
||||||
|
cmova %rdx, %r15
|
||||||
|
|
||||||
|
/* r15 = r15 * 5 */
|
||||||
|
leaq (%r15, %r15, 4), %r15
|
||||||
|
|
||||||
|
/* olimit = op3 + r15 */
|
||||||
|
addq %op3, %olimit
|
||||||
|
|
||||||
|
movq 8(%rsp), %rdx
|
||||||
|
movq 0(%rsp), %rbx
|
||||||
|
|
||||||
|
/* If (op3 + 20 > olimit) */
|
||||||
|
movq %op3, %rax /* rax = op3 */
|
||||||
|
addq $20, %rax /* rax = op3 + 20 */
|
||||||
|
cmpq %rax, %olimit /* op3 + 20 > olimit */
|
||||||
|
jb .L_4X1_exit
|
||||||
|
|
||||||
|
/* If (ip1 < ip0) go to exit */
|
||||||
|
cmpq %ip0, %ip1
|
||||||
|
jb .L_4X1_exit
|
||||||
|
|
||||||
|
/* If (ip2 < ip1) go to exit */
|
||||||
|
cmpq %ip1, %ip2
|
||||||
|
jb .L_4X1_exit
|
||||||
|
|
||||||
|
/* If (ip3 < ip2) go to exit */
|
||||||
|
cmpq %ip2, %ip3
|
||||||
|
jb .L_4X1_exit
|
||||||
|
|
||||||
|
/* Reads top 11 bits from bits[n]
|
||||||
|
* Loads dt[bits[n]] into var[n]
|
||||||
|
*/
|
||||||
|
#define GET_NEXT_DELT(n) \
|
||||||
|
movq $53, %var##n; \
|
||||||
|
shrxq %var##n, %bits##n, %var##n; \
|
||||||
|
movzwl (%dtable,%var##n,2),%vard##n
|
||||||
|
|
||||||
|
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
|
||||||
|
* Moves var[n] to %rax
|
||||||
|
* bits[n] <<= var[n] & 63
|
||||||
|
* op[n][idx] = %rax >> 8
|
||||||
|
* %ah is a way to access bits [8, 16) of %rax
|
||||||
|
*/
|
||||||
|
#define DECODE_FROM_DELT(n, idx) \
|
||||||
|
movq %var##n, %rax; \
|
||||||
|
shlxq %var##n, %bits##n, %bits##n; \
|
||||||
|
movb %ah, idx(%op##n)
|
||||||
|
|
||||||
|
/* Assumes GET_NEXT_DELT has been called.
|
||||||
|
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
|
||||||
|
*/
|
||||||
|
#define DECODE_AND_GET_NEXT(n, idx) \
|
||||||
|
DECODE_FROM_DELT(n, idx); \
|
||||||
|
GET_NEXT_DELT(n) \
|
||||||
|
|
||||||
|
/* // ctz & nbBytes is stored in bits[n]
|
||||||
|
* // nbBits is stored in %rax
|
||||||
|
* ctz = CTZ[bits[n]]
|
||||||
|
* nbBits = ctz & 7
|
||||||
|
* nbBytes = ctz >> 3
|
||||||
|
* op[n] += 5
|
||||||
|
* ip[n] -= nbBytes
|
||||||
|
* // Note: x86-64 is little-endian ==> no bswap
|
||||||
|
* bits[n] = MEM_readST(ip[n]) | 1
|
||||||
|
* bits[n] <<= nbBits
|
||||||
|
*/
|
||||||
|
#define RELOAD_BITS(n) \
|
||||||
|
bsfq %bits##n, %bits##n; \
|
||||||
|
movq %bits##n, %rax; \
|
||||||
|
andq $7, %rax; \
|
||||||
|
shrq $3, %bits##n; \
|
||||||
|
leaq 5(%op##n), %op##n; \
|
||||||
|
subq %bits##n, %ip##n; \
|
||||||
|
movq (%ip##n), %bits##n; \
|
||||||
|
orq $1, %bits##n; \
|
||||||
|
shlx %rax, %bits##n, %bits##n
|
||||||
|
|
||||||
|
/* Store clobbered variables on the stack */
|
||||||
|
movq %olimit, 24(%rsp)
|
||||||
|
movq %ip1, 0(%rsp)
|
||||||
|
movq %ip2, 8(%rsp)
|
||||||
|
movq %ip3, 16(%rsp)
|
||||||
|
|
||||||
|
/* Call GET_NEXT_DELT for each stream */
|
||||||
|
FOR_EACH_STREAM(GET_NEXT_DELT)
|
||||||
|
|
||||||
|
.p2align 6
|
||||||
|
|
||||||
|
.L_4X1_loop_body:
|
||||||
|
/* Decode 5 symbols in each of the 4 streams (20 total)
|
||||||
|
* Must have called GET_NEXT_DELT for each stream
|
||||||
|
*/
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
|
||||||
|
|
||||||
|
/* Load ip[1,2,3] from stack (var[] aliases them)
|
||||||
|
* ip[] is needed for RELOAD_BITS
|
||||||
|
* Each will be stored back to the stack after RELOAD
|
||||||
|
*/
|
||||||
|
movq 0(%rsp), %ip1
|
||||||
|
movq 8(%rsp), %ip2
|
||||||
|
movq 16(%rsp), %ip3
|
||||||
|
|
||||||
|
/* Reload each stream & fetch the next table entry
|
||||||
|
* to prepare for the next iteration
|
||||||
|
*/
|
||||||
|
RELOAD_BITS(0)
|
||||||
|
GET_NEXT_DELT(0)
|
||||||
|
|
||||||
|
RELOAD_BITS(1)
|
||||||
|
movq %ip1, 0(%rsp)
|
||||||
|
GET_NEXT_DELT(1)
|
||||||
|
|
||||||
|
RELOAD_BITS(2)
|
||||||
|
movq %ip2, 8(%rsp)
|
||||||
|
GET_NEXT_DELT(2)
|
||||||
|
|
||||||
|
RELOAD_BITS(3)
|
||||||
|
movq %ip3, 16(%rsp)
|
||||||
|
GET_NEXT_DELT(3)
|
||||||
|
|
||||||
|
/* If op3 < olimit: continue the loop */
|
||||||
|
cmp %op3, 24(%rsp)
|
||||||
|
ja .L_4X1_loop_body
|
||||||
|
|
||||||
|
/* Reload ip[1,2,3] from stack */
|
||||||
|
movq 0(%rsp), %ip1
|
||||||
|
movq 8(%rsp), %ip2
|
||||||
|
movq 16(%rsp), %ip3
|
||||||
|
|
||||||
|
/* Re-compute olimit */
|
||||||
|
jmp .L_4X1_compute_olimit
|
||||||
|
|
||||||
|
#undef GET_NEXT_DELT
|
||||||
|
#undef DECODE_FROM_DELT
|
||||||
|
#undef DECODE
|
||||||
|
#undef RELOAD_BITS
|
||||||
|
.L_4X1_exit:
|
||||||
|
addq $24, %rsp
|
||||||
|
|
||||||
|
/* Restore stack (oend & olimit) */
|
||||||
|
pop %rax /* olimit */
|
||||||
|
pop %rax /* oend */
|
||||||
|
pop %rax /* ilimit */
|
||||||
|
pop %rax /* arg */
|
||||||
|
|
||||||
|
/* Save ip / op / bits */
|
||||||
|
movq %ip0, 0(%rax)
|
||||||
|
movq %ip1, 8(%rax)
|
||||||
|
movq %ip2, 16(%rax)
|
||||||
|
movq %ip3, 24(%rax)
|
||||||
|
movq %op0, 32(%rax)
|
||||||
|
movq %op1, 40(%rax)
|
||||||
|
movq %op2, 48(%rax)
|
||||||
|
movq %op3, 56(%rax)
|
||||||
|
movq %bits0, 64(%rax)
|
||||||
|
movq %bits1, 72(%rax)
|
||||||
|
movq %bits2, 80(%rax)
|
||||||
|
movq %bits3, 88(%rax)
|
||||||
|
|
||||||
|
/* Restore registers */
|
||||||
|
pop %r15
|
||||||
|
pop %r14
|
||||||
|
pop %r13
|
||||||
|
pop %r12
|
||||||
|
pop %r11
|
||||||
|
pop %r10
|
||||||
|
pop %r9
|
||||||
|
pop %r8
|
||||||
|
pop %rdi
|
||||||
|
pop %rsi
|
||||||
|
pop %rbp
|
||||||
|
pop %rdx
|
||||||
|
pop %rcx
|
||||||
|
pop %rbx
|
||||||
|
pop %rax
|
||||||
|
ret
|
||||||
|
|
||||||
|
_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop:
|
||||||
|
HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop:
|
||||||
|
/* Save all registers - even if they are callee saved for simplicity. */
|
||||||
|
push %rax
|
||||||
|
push %rbx
|
||||||
|
push %rcx
|
||||||
|
push %rdx
|
||||||
|
push %rbp
|
||||||
|
push %rsi
|
||||||
|
push %rdi
|
||||||
|
push %r8
|
||||||
|
push %r9
|
||||||
|
push %r10
|
||||||
|
push %r11
|
||||||
|
push %r12
|
||||||
|
push %r13
|
||||||
|
push %r14
|
||||||
|
push %r15
|
||||||
|
|
||||||
|
movq %rdi, %rax
|
||||||
|
movq 0(%rax), %ip0
|
||||||
|
movq 8(%rax), %ip1
|
||||||
|
movq 16(%rax), %ip2
|
||||||
|
movq 24(%rax), %ip3
|
||||||
|
movq 32(%rax), %op0
|
||||||
|
movq 40(%rax), %op1
|
||||||
|
movq 48(%rax), %op2
|
||||||
|
movq 56(%rax), %op3
|
||||||
|
movq 64(%rax), %bits0
|
||||||
|
movq 72(%rax), %bits1
|
||||||
|
movq 80(%rax), %bits2
|
||||||
|
movq 88(%rax), %bits3
|
||||||
|
movq 96(%rax), %dtable
|
||||||
|
push %rax /* argument */
|
||||||
|
push %rax /* olimit */
|
||||||
|
push 104(%rax) /* ilimit */
|
||||||
|
|
||||||
|
movq 112(%rax), %rax
|
||||||
|
push %rax /* oend3 */
|
||||||
|
|
||||||
|
movq %op3, %rax
|
||||||
|
push %rax /* oend2 */
|
||||||
|
|
||||||
|
movq %op2, %rax
|
||||||
|
push %rax /* oend1 */
|
||||||
|
|
||||||
|
movq %op1, %rax
|
||||||
|
push %rax /* oend0 */
|
||||||
|
|
||||||
|
/* Scratch space */
|
||||||
|
subq $8, %rsp
|
||||||
|
|
||||||
|
.L_4X2_compute_olimit:
|
||||||
|
/* Computes how many iterations we can do safely
|
||||||
|
* %r15, %rax may be clobbered
|
||||||
|
* rdx must be saved
|
||||||
|
* op[1,2,3,4] & ip0 mustn't be clobbered
|
||||||
|
*/
|
||||||
|
movq %rdx, 0(%rsp)
|
||||||
|
|
||||||
|
/* We can consume up to 7 input bytes each iteration. */
|
||||||
|
movq %ip0, %rax /* rax = ip0 */
|
||||||
|
movq 40(%rsp), %rdx /* rdx = ilimit */
|
||||||
|
subq %rdx, %rax /* rax = ip0 - ilimit */
|
||||||
|
movq %rax, %r15 /* r15 = ip0 - ilimit */
|
||||||
|
|
||||||
|
/* rdx = rax / 7 */
|
||||||
|
movabsq $2635249153387078803, %rdx
|
||||||
|
mulq %rdx
|
||||||
|
subq %rdx, %r15
|
||||||
|
shrq %r15
|
||||||
|
addq %r15, %rdx
|
||||||
|
shrq $2, %rdx
|
||||||
|
|
||||||
|
/* r15 = (ip0 - ilimit) / 7 */
|
||||||
|
movq %rdx, %r15
|
||||||
|
|
||||||
|
movabsq $-3689348814741910323, %rdx
|
||||||
|
movq 8(%rsp), %rax /* rax = oend0 */
|
||||||
|
subq %op0, %rax /* rax = oend0 - op0 */
|
||||||
|
mulq %rdx
|
||||||
|
shrq $3, %rdx /* rdx = rax / 10 */
|
||||||
|
|
||||||
|
/* r15 = min(%rdx, %r15) */
|
||||||
|
cmpq %rdx, %r15
|
||||||
|
cmova %rdx, %r15
|
||||||
|
|
||||||
|
movabsq $-3689348814741910323, %rdx
|
||||||
|
movq 16(%rsp), %rax /* rax = oend1 */
|
||||||
|
subq %op1, %rax /* rax = oend1 - op1 */
|
||||||
|
mulq %rdx
|
||||||
|
shrq $3, %rdx /* rdx = rax / 10 */
|
||||||
|
|
||||||
|
/* r15 = min(%rdx, %r15) */
|
||||||
|
cmpq %rdx, %r15
|
||||||
|
cmova %rdx, %r15
|
||||||
|
|
||||||
|
movabsq $-3689348814741910323, %rdx
|
||||||
|
movq 24(%rsp), %rax /* rax = oend2 */
|
||||||
|
subq %op2, %rax /* rax = oend2 - op2 */
|
||||||
|
mulq %rdx
|
||||||
|
shrq $3, %rdx /* rdx = rax / 10 */
|
||||||
|
|
||||||
|
/* r15 = min(%rdx, %r15) */
|
||||||
|
cmpq %rdx, %r15
|
||||||
|
cmova %rdx, %r15
|
||||||
|
|
||||||
|
movabsq $-3689348814741910323, %rdx
|
||||||
|
movq 32(%rsp), %rax /* rax = oend3 */
|
||||||
|
subq %op3, %rax /* rax = oend3 - op3 */
|
||||||
|
mulq %rdx
|
||||||
|
shrq $3, %rdx /* rdx = rax / 10 */
|
||||||
|
|
||||||
|
/* r15 = min(%rdx, %r15) */
|
||||||
|
cmpq %rdx, %r15
|
||||||
|
cmova %rdx, %r15
|
||||||
|
|
||||||
|
/* olimit = op3 + 5 * r15 */
|
||||||
|
movq %r15, %rax
|
||||||
|
leaq (%op3, %rax, 4), %olimit
|
||||||
|
addq %rax, %olimit
|
||||||
|
|
||||||
|
movq 0(%rsp), %rdx
|
||||||
|
|
||||||
|
/* If (op3 + 10 > olimit) */
|
||||||
|
movq %op3, %rax /* rax = op3 */
|
||||||
|
addq $10, %rax /* rax = op3 + 10 */
|
||||||
|
cmpq %rax, %olimit /* op3 + 10 > olimit */
|
||||||
|
jb .L_4X2_exit
|
||||||
|
|
||||||
|
/* If (ip1 < ip0) go to exit */
|
||||||
|
cmpq %ip0, %ip1
|
||||||
|
jb .L_4X2_exit
|
||||||
|
|
||||||
|
/* If (ip2 < ip1) go to exit */
|
||||||
|
cmpq %ip1, %ip2
|
||||||
|
jb .L_4X2_exit
|
||||||
|
|
||||||
|
/* If (ip3 < ip2) go to exit */
|
||||||
|
cmpq %ip2, %ip3
|
||||||
|
jb .L_4X2_exit
|
||||||
|
|
||||||
|
#define DECODE(n, idx) \
|
||||||
|
movq %bits##n, %rax; \
|
||||||
|
shrq $53, %rax; \
|
||||||
|
movzwl 0(%dtable,%rax,4),%r8d; \
|
||||||
|
movzbl 2(%dtable,%rax,4),%r15d; \
|
||||||
|
movzbl 3(%dtable,%rax,4),%eax; \
|
||||||
|
movw %r8w, (%op##n); \
|
||||||
|
shlxq %r15, %bits##n, %bits##n; \
|
||||||
|
addq %rax, %op##n
|
||||||
|
|
||||||
|
#define RELOAD_BITS(n) \
|
||||||
|
bsfq %bits##n, %bits##n; \
|
||||||
|
movq %bits##n, %rax; \
|
||||||
|
shrq $3, %bits##n; \
|
||||||
|
andq $7, %rax; \
|
||||||
|
subq %bits##n, %ip##n; \
|
||||||
|
movq (%ip##n), %bits##n; \
|
||||||
|
orq $1, %bits##n; \
|
||||||
|
shlxq %rax, %bits##n, %bits##n
|
||||||
|
|
||||||
|
|
||||||
|
movq %olimit, 48(%rsp)
|
||||||
|
|
||||||
|
.p2align 6
|
||||||
|
|
||||||
|
.L_4X2_loop_body:
|
||||||
|
/* We clobber r8, so store it on the stack */
|
||||||
|
movq %r8, 0(%rsp)
|
||||||
|
|
||||||
|
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
|
||||||
|
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
|
||||||
|
|
||||||
|
/* Reload r8 */
|
||||||
|
movq 0(%rsp), %r8
|
||||||
|
|
||||||
|
FOR_EACH_STREAM(RELOAD_BITS)
|
||||||
|
|
||||||
|
cmp %op3, 48(%rsp)
|
||||||
|
ja .L_4X2_loop_body
|
||||||
|
jmp .L_4X2_compute_olimit
|
||||||
|
|
||||||
|
#undef DECODE
|
||||||
|
#undef RELOAD_BITS
|
||||||
|
.L_4X2_exit:
|
||||||
|
addq $8, %rsp
|
||||||
|
/* Restore stack (oend & olimit) */
|
||||||
|
pop %rax /* oend0 */
|
||||||
|
pop %rax /* oend1 */
|
||||||
|
pop %rax /* oend2 */
|
||||||
|
pop %rax /* oend3 */
|
||||||
|
pop %rax /* ilimit */
|
||||||
|
pop %rax /* olimit */
|
||||||
|
pop %rax /* arg */
|
||||||
|
|
||||||
|
/* Save ip / op / bits */
|
||||||
|
movq %ip0, 0(%rax)
|
||||||
|
movq %ip1, 8(%rax)
|
||||||
|
movq %ip2, 16(%rax)
|
||||||
|
movq %ip3, 24(%rax)
|
||||||
|
movq %op0, 32(%rax)
|
||||||
|
movq %op1, 40(%rax)
|
||||||
|
movq %op2, 48(%rax)
|
||||||
|
movq %op3, 56(%rax)
|
||||||
|
movq %bits0, 64(%rax)
|
||||||
|
movq %bits1, 72(%rax)
|
||||||
|
movq %bits2, 80(%rax)
|
||||||
|
movq %bits3, 88(%rax)
|
||||||
|
|
||||||
|
/* Restore registers */
|
||||||
|
pop %r15
|
||||||
|
pop %r14
|
||||||
|
pop %r13
|
||||||
|
pop %r12
|
||||||
|
pop %r11
|
||||||
|
pop %r10
|
||||||
|
pop %r9
|
||||||
|
pop %r8
|
||||||
|
pop %rdi
|
||||||
|
pop %rsi
|
||||||
|
pop %rbp
|
||||||
|
pop %rdx
|
||||||
|
pop %rcx
|
||||||
|
pop %rbx
|
||||||
|
pop %rax
|
||||||
|
ret
|
||||||
|
|
||||||
|
#endif
|
@ -56,7 +56,6 @@
|
|||||||
* Dependencies
|
* Dependencies
|
||||||
*********************************************************/
|
*********************************************************/
|
||||||
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
|
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
|
||||||
#include "../common/cpu.h" /* bmi2 */
|
|
||||||
#include "../common/mem.h" /* low level memory routines */
|
#include "../common/mem.h" /* low level memory routines */
|
||||||
#define FSE_STATIC_LINKING_ONLY
|
#define FSE_STATIC_LINKING_ONLY
|
||||||
#include "../common/fse.h"
|
#include "../common/fse.h"
|
||||||
@ -177,12 +176,15 @@ static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet,
|
|||||||
static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
|
static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
|
||||||
ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
|
ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
|
||||||
DEBUGLOG(4, "Allocating new hash set");
|
DEBUGLOG(4, "Allocating new hash set");
|
||||||
|
if (!ret)
|
||||||
|
return NULL;
|
||||||
ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
|
ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
|
||||||
ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
|
if (!ret->ddictPtrTable) {
|
||||||
ret->ddictPtrCount = 0;
|
ZSTD_customFree(ret, customMem);
|
||||||
if (!ret || !ret->ddictPtrTable) {
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
|
||||||
|
ret->ddictPtrCount = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,11 +257,15 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
|
|||||||
dctx->inBuffSize = 0;
|
dctx->inBuffSize = 0;
|
||||||
dctx->outBuffSize = 0;
|
dctx->outBuffSize = 0;
|
||||||
dctx->streamStage = zdss_init;
|
dctx->streamStage = zdss_init;
|
||||||
|
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
||||||
dctx->legacyContext = NULL;
|
dctx->legacyContext = NULL;
|
||||||
dctx->previousLegacyVersion = 0;
|
dctx->previousLegacyVersion = 0;
|
||||||
|
#endif
|
||||||
dctx->noForwardProgress = 0;
|
dctx->noForwardProgress = 0;
|
||||||
dctx->oversizedDuration = 0;
|
dctx->oversizedDuration = 0;
|
||||||
dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
|
#if DYNAMIC_BMI2
|
||||||
|
dctx->bmi2 = ZSTD_cpuSupportsBmi2();
|
||||||
|
#endif
|
||||||
dctx->ddictSet = NULL;
|
dctx->ddictSet = NULL;
|
||||||
ZSTD_DCtx_resetParameters(dctx);
|
ZSTD_DCtx_resetParameters(dctx);
|
||||||
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||||
@ -280,8 +286,7 @@ ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
|
|||||||
return dctx;
|
return dctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
|
static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) {
|
||||||
{
|
|
||||||
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
|
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
|
||||||
|
|
||||||
{ ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
|
{ ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
|
||||||
@ -292,10 +297,15 @@ ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
|
||||||
|
{
|
||||||
|
return ZSTD_createDCtx_internal(customMem);
|
||||||
|
}
|
||||||
|
|
||||||
ZSTD_DCtx* ZSTD_createDCtx(void)
|
ZSTD_DCtx* ZSTD_createDCtx(void)
|
||||||
{
|
{
|
||||||
DEBUGLOG(3, "ZSTD_createDCtx");
|
DEBUGLOG(3, "ZSTD_createDCtx");
|
||||||
return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
|
return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ZSTD_clearDict(ZSTD_DCtx* dctx)
|
static void ZSTD_clearDict(ZSTD_DCtx* dctx)
|
||||||
@ -380,6 +390,19 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! ZSTD_isSkippableFrame() :
|
||||||
|
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
|
||||||
|
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
|
||||||
|
*/
|
||||||
|
unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
|
||||||
|
{
|
||||||
|
if (size < ZSTD_FRAMEIDSIZE) return 0;
|
||||||
|
{ U32 const magic = MEM_readLE32(buffer);
|
||||||
|
if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/** ZSTD_frameHeaderSize_internal() :
|
/** ZSTD_frameHeaderSize_internal() :
|
||||||
* srcSize must be large enough to reach header size fields.
|
* srcSize must be large enough to reach header size fields.
|
||||||
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
|
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
|
||||||
@ -466,7 +489,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
|
|||||||
}
|
}
|
||||||
switch(dictIDSizeCode)
|
switch(dictIDSizeCode)
|
||||||
{
|
{
|
||||||
default: assert(0); /* impossible */
|
default:
|
||||||
|
assert(0); /* impossible */
|
||||||
|
ZSTD_FALLTHROUGH;
|
||||||
case 0 : break;
|
case 0 : break;
|
||||||
case 1 : dictID = ip[pos]; pos++; break;
|
case 1 : dictID = ip[pos]; pos++; break;
|
||||||
case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
|
case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
|
||||||
@ -474,7 +499,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
|
|||||||
}
|
}
|
||||||
switch(fcsID)
|
switch(fcsID)
|
||||||
{
|
{
|
||||||
default: assert(0); /* impossible */
|
default:
|
||||||
|
assert(0); /* impossible */
|
||||||
|
ZSTD_FALLTHROUGH;
|
||||||
case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
|
case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
|
||||||
case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
|
case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
|
||||||
case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
|
case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
|
||||||
@ -503,7 +530,6 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
|
|||||||
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
|
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** ZSTD_getFrameContentSize() :
|
/** ZSTD_getFrameContentSize() :
|
||||||
* compatible with legacy mode
|
* compatible with legacy mode
|
||||||
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
|
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
|
||||||
@ -544,6 +570,37 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! ZSTD_readSkippableFrame() :
|
||||||
|
* Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
|
||||||
|
*
|
||||||
|
* The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
|
||||||
|
* i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
|
||||||
|
* in the magicVariant.
|
||||||
|
*
|
||||||
|
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
|
||||||
|
*
|
||||||
|
* @return : number of bytes written or a ZSTD error.
|
||||||
|
*/
|
||||||
|
ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
|
||||||
|
const void* src, size_t srcSize)
|
||||||
|
{
|
||||||
|
U32 const magicNumber = MEM_readLE32(src);
|
||||||
|
size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
|
||||||
|
size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
|
||||||
|
|
||||||
|
/* check input validity */
|
||||||
|
RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
|
||||||
|
RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
|
||||||
|
RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
|
||||||
|
|
||||||
|
/* deliver payload */
|
||||||
|
if (skippableContentSize > 0 && dst != NULL)
|
||||||
|
ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
|
||||||
|
if (magicVariant != NULL)
|
||||||
|
*magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
|
||||||
|
return skippableContentSize;
|
||||||
|
}
|
||||||
|
|
||||||
/** ZSTD_findDecompressedSize() :
|
/** ZSTD_findDecompressedSize() :
|
||||||
* compatible with legacy mode
|
* compatible with legacy mode
|
||||||
* `srcSize` must be the exact length of some number of ZSTD compressed and/or
|
* `srcSize` must be the exact length of some number of ZSTD compressed and/or
|
||||||
@ -858,7 +915,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
|||||||
switch(blockProperties.blockType)
|
switch(blockProperties.blockType)
|
||||||
{
|
{
|
||||||
case bt_compressed:
|
case bt_compressed:
|
||||||
decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
|
decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming);
|
||||||
break;
|
break;
|
||||||
case bt_raw :
|
case bt_raw :
|
||||||
decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
|
decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
|
||||||
@ -1009,7 +1066,7 @@ static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
|
|||||||
switch (dctx->dictUses) {
|
switch (dctx->dictUses) {
|
||||||
default:
|
default:
|
||||||
assert(0 /* Impossible */);
|
assert(0 /* Impossible */);
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
case ZSTD_dont_use:
|
case ZSTD_dont_use:
|
||||||
ZSTD_clearDict(dctx);
|
ZSTD_clearDict(dctx);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1031,7 +1088,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
|
|||||||
{
|
{
|
||||||
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
|
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
|
||||||
size_t regenSize;
|
size_t regenSize;
|
||||||
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
|
ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem);
|
||||||
RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
|
RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
|
||||||
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
|
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
|
||||||
ZSTD_freeDCtx(dctx);
|
ZSTD_freeDCtx(dctx);
|
||||||
@ -1065,7 +1122,7 @@ static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t
|
|||||||
return dctx->expected;
|
return dctx->expected;
|
||||||
if (dctx->bType != bt_raw)
|
if (dctx->bType != bt_raw)
|
||||||
return dctx->expected;
|
return dctx->expected;
|
||||||
return MIN(MAX(inputSize, 1), dctx->expected);
|
return BOUNDED(1, inputSize, dctx->expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
|
ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
|
||||||
@ -1073,7 +1130,9 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
|
|||||||
{
|
{
|
||||||
default: /* should not happen */
|
default: /* should not happen */
|
||||||
assert(0);
|
assert(0);
|
||||||
|
ZSTD_FALLTHROUGH;
|
||||||
case ZSTDds_getFrameHeaderSize:
|
case ZSTDds_getFrameHeaderSize:
|
||||||
|
ZSTD_FALLTHROUGH;
|
||||||
case ZSTDds_decodeFrameHeader:
|
case ZSTDds_decodeFrameHeader:
|
||||||
return ZSTDnit_frameHeader;
|
return ZSTDnit_frameHeader;
|
||||||
case ZSTDds_decodeBlockHeader:
|
case ZSTDds_decodeBlockHeader:
|
||||||
@ -1085,6 +1144,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
|
|||||||
case ZSTDds_checkChecksum:
|
case ZSTDds_checkChecksum:
|
||||||
return ZSTDnit_checksum;
|
return ZSTDnit_checksum;
|
||||||
case ZSTDds_decodeSkippableHeader:
|
case ZSTDds_decodeSkippableHeader:
|
||||||
|
ZSTD_FALLTHROUGH;
|
||||||
case ZSTDds_skipFrame:
|
case ZSTDds_skipFrame:
|
||||||
return ZSTDnit_skippableFrame;
|
return ZSTDnit_skippableFrame;
|
||||||
}
|
}
|
||||||
@ -1168,7 +1228,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
|||||||
{
|
{
|
||||||
case bt_compressed:
|
case bt_compressed:
|
||||||
DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
|
DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
|
||||||
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
|
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
|
||||||
dctx->expected = 0; /* Streaming not supported */
|
dctx->expected = 0; /* Streaming not supported */
|
||||||
break;
|
break;
|
||||||
case bt_raw :
|
case bt_raw :
|
||||||
@ -1493,7 +1553,7 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
|
|||||||
ZSTD_DStream* ZSTD_createDStream(void)
|
ZSTD_DStream* ZSTD_createDStream(void)
|
||||||
{
|
{
|
||||||
DEBUGLOG(3, "ZSTD_createDStream");
|
DEBUGLOG(3, "ZSTD_createDStream");
|
||||||
return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
|
return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
|
ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
|
||||||
@ -1503,7 +1563,7 @@ ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
|
|||||||
|
|
||||||
ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
|
ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
|
||||||
{
|
{
|
||||||
return ZSTD_createDCtx_advanced(customMem);
|
return ZSTD_createDCtx_internal(customMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ZSTD_freeDStream(ZSTD_DStream* zds)
|
size_t ZSTD_freeDStream(ZSTD_DStream* zds)
|
||||||
@ -1763,7 +1823,8 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
|
|||||||
size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
|
size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
|
||||||
{
|
{
|
||||||
size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
|
size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
|
||||||
unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
|
/* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
|
||||||
|
unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
|
||||||
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
|
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
|
||||||
size_t const minRBSize = (size_t) neededSize;
|
size_t const minRBSize = (size_t) neededSize;
|
||||||
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
|
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
|
||||||
@ -1897,10 +1958,12 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||||||
DEBUGLOG(5, "stage zdss_init => transparent reset ");
|
DEBUGLOG(5, "stage zdss_init => transparent reset ");
|
||||||
zds->streamStage = zdss_loadHeader;
|
zds->streamStage = zdss_loadHeader;
|
||||||
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
|
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
|
||||||
|
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
||||||
zds->legacyVersion = 0;
|
zds->legacyVersion = 0;
|
||||||
|
#endif
|
||||||
zds->hostageByte = 0;
|
zds->hostageByte = 0;
|
||||||
zds->expectedOutBuffer = *output;
|
zds->expectedOutBuffer = *output;
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case zdss_loadHeader :
|
case zdss_loadHeader :
|
||||||
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
|
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
|
||||||
@ -2038,7 +2101,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||||||
zds->outBuffSize = neededOutBuffSize;
|
zds->outBuffSize = neededOutBuffSize;
|
||||||
} } }
|
} } }
|
||||||
zds->streamStage = zdss_read;
|
zds->streamStage = zdss_read;
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case zdss_read:
|
case zdss_read:
|
||||||
DEBUGLOG(5, "stage zdss_read");
|
DEBUGLOG(5, "stage zdss_read");
|
||||||
@ -2057,7 +2120,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
|||||||
} }
|
} }
|
||||||
if (ip==iend) { someMoreWork = 0; break; } /* no more input */
|
if (ip==iend) { someMoreWork = 0; break; } /* no more input */
|
||||||
zds->streamStage = zdss_load;
|
zds->streamStage = zdss_load;
|
||||||
/* fall-through */
|
ZSTD_FALLTHROUGH;
|
||||||
|
|
||||||
case zdss_load:
|
case zdss_load:
|
||||||
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
|
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -33,6 +33,12 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/* Streaming state is used to inform allocation of the literal buffer */
|
||||||
|
typedef enum {
|
||||||
|
not_streaming = 0,
|
||||||
|
is_streaming = 1
|
||||||
|
} streaming_operation;
|
||||||
|
|
||||||
/* ZSTD_decompressBlock_internal() :
|
/* ZSTD_decompressBlock_internal() :
|
||||||
* decompress block, starting at `src`,
|
* decompress block, starting at `src`,
|
||||||
* into destination buffer `dst`.
|
* into destination buffer `dst`.
|
||||||
@ -41,7 +47,7 @@
|
|||||||
*/
|
*/
|
||||||
size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
||||||
void* dst, size_t dstCapacity,
|
void* dst, size_t dstCapacity,
|
||||||
const void* src, size_t srcSize, const int frame);
|
const void* src, size_t srcSize, const int frame, const streaming_operation streaming);
|
||||||
|
|
||||||
/* ZSTD_buildFSETable() :
|
/* ZSTD_buildFSETable() :
|
||||||
* generate FSE decoding table for one symbol (ll, ml or off)
|
* generate FSE decoding table for one symbol (ll, ml or off)
|
||||||
@ -54,7 +60,7 @@ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
|||||||
*/
|
*/
|
||||||
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
|
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
|
||||||
const short* normalizedCounter, unsigned maxSymbolValue,
|
const short* normalizedCounter, unsigned maxSymbolValue,
|
||||||
const U32* baseValue, const U32* nbAdditionalBits,
|
const U32* baseValue, const U8* nbAdditionalBits,
|
||||||
unsigned tableLog, void* wksp, size_t wkspSize,
|
unsigned tableLog, void* wksp, size_t wkspSize,
|
||||||
int bmi2);
|
int bmi2);
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
* Dependencies
|
* Dependencies
|
||||||
*********************************************************/
|
*********************************************************/
|
||||||
#include "../common/mem.h" /* BYTE, U16, U32 */
|
#include "../common/mem.h" /* BYTE, U16, U32 */
|
||||||
#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */
|
#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
|
|||||||
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
|
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
|
||||||
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
|
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
|
||||||
|
|
||||||
static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
|
static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = {
|
||||||
0, 1, 2, 3, 4, 5, 6, 7,
|
0, 1, 2, 3, 4, 5, 6, 7,
|
||||||
8, 9, 10, 11, 12, 13, 14, 15,
|
8, 9, 10, 11, 12, 13, 14, 15,
|
||||||
16, 17, 18, 19, 20, 21, 22, 23,
|
16, 17, 18, 19, 20, 21, 22, 23,
|
||||||
@ -106,6 +106,22 @@ typedef struct {
|
|||||||
size_t ddictPtrCount;
|
size_t ddictPtrCount;
|
||||||
} ZSTD_DDictHashSet;
|
} ZSTD_DDictHashSet;
|
||||||
|
|
||||||
|
#ifndef ZSTD_DECODER_INTERNAL_BUFFER
|
||||||
|
# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ZSTD_LBMIN 64
|
||||||
|
#define ZSTD_LBMAX (128 << 10)
|
||||||
|
|
||||||
|
/* extra buffer, compensates when dst is not large enough to store litBuffer */
|
||||||
|
#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */
|
||||||
|
ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */
|
||||||
|
ZSTD_split = 2 /* Split between litExtraBuffer and dst */
|
||||||
|
} ZSTD_litLocation_e;
|
||||||
|
|
||||||
struct ZSTD_DCtx_s
|
struct ZSTD_DCtx_s
|
||||||
{
|
{
|
||||||
const ZSTD_seqSymbol* LLTptr;
|
const ZSTD_seqSymbol* LLTptr;
|
||||||
@ -136,7 +152,9 @@ struct ZSTD_DCtx_s
|
|||||||
size_t litSize;
|
size_t litSize;
|
||||||
size_t rleSize;
|
size_t rleSize;
|
||||||
size_t staticSize;
|
size_t staticSize;
|
||||||
|
#if DYNAMIC_BMI2 != 0
|
||||||
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
|
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
|
||||||
|
#endif
|
||||||
|
|
||||||
/* dictionary */
|
/* dictionary */
|
||||||
ZSTD_DDict* ddictLocal;
|
ZSTD_DDict* ddictLocal;
|
||||||
@ -158,16 +176,21 @@ struct ZSTD_DCtx_s
|
|||||||
size_t outStart;
|
size_t outStart;
|
||||||
size_t outEnd;
|
size_t outEnd;
|
||||||
size_t lhSize;
|
size_t lhSize;
|
||||||
|
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
||||||
void* legacyContext;
|
void* legacyContext;
|
||||||
U32 previousLegacyVersion;
|
U32 previousLegacyVersion;
|
||||||
U32 legacyVersion;
|
U32 legacyVersion;
|
||||||
|
#endif
|
||||||
U32 hostageByte;
|
U32 hostageByte;
|
||||||
int noForwardProgress;
|
int noForwardProgress;
|
||||||
ZSTD_bufferMode_e outBufferMode;
|
ZSTD_bufferMode_e outBufferMode;
|
||||||
ZSTD_outBuffer expectedOutBuffer;
|
ZSTD_outBuffer expectedOutBuffer;
|
||||||
|
|
||||||
/* workspace */
|
/* workspace */
|
||||||
BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
|
BYTE* litBuffer;
|
||||||
|
const BYTE* litBufferEnd;
|
||||||
|
ZSTD_litLocation_e litBufferLocation;
|
||||||
|
BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */
|
||||||
BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
|
BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
|
||||||
|
|
||||||
size_t oversizedDuration;
|
size_t oversizedDuration;
|
||||||
@ -183,6 +206,14 @@ struct ZSTD_DCtx_s
|
|||||||
#endif
|
#endif
|
||||||
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
|
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
|
||||||
|
|
||||||
|
MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
|
||||||
|
#if DYNAMIC_BMI2 != 0
|
||||||
|
return dctx->bmi2;
|
||||||
|
#else
|
||||||
|
(void)dctx;
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/*-*******************************************************
|
/*-*******************************************************
|
||||||
* Shared internal functions
|
* Shared internal functions
|
||||||
|
@ -40,6 +40,13 @@
|
|||||||
/*-*************************************
|
/*-*************************************
|
||||||
* Constants
|
* Constants
|
||||||
***************************************/
|
***************************************/
|
||||||
|
/**
|
||||||
|
* There are 32bit indexes used to ref samples, so limit samples size to 4GB
|
||||||
|
* on 64bit builds.
|
||||||
|
* For 32bit builds we choose 1 GB.
|
||||||
|
* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
|
||||||
|
* contiguous buffer, so 1GB is already a high limit.
|
||||||
|
*/
|
||||||
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
|
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
|
||||||
#define COVER_DEFAULT_SPLITPOINT 1.0
|
#define COVER_DEFAULT_SPLITPOINT 1.0
|
||||||
|
|
||||||
@ -47,7 +54,7 @@
|
|||||||
* Console display
|
* Console display
|
||||||
***************************************/
|
***************************************/
|
||||||
#ifndef LOCALDISPLAYLEVEL
|
#ifndef LOCALDISPLAYLEVEL
|
||||||
static int g_displayLevel = 2;
|
static int g_displayLevel = 0;
|
||||||
#endif
|
#endif
|
||||||
#undef DISPLAY
|
#undef DISPLAY
|
||||||
#define DISPLAY(...) \
|
#define DISPLAY(...) \
|
||||||
@ -735,7 +742,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
|
|||||||
COVER_map_t activeDmers;
|
COVER_map_t activeDmers;
|
||||||
parameters.splitPoint = 1.0;
|
parameters.splitPoint = 1.0;
|
||||||
/* Initialize global data */
|
/* Initialize global data */
|
||||||
g_displayLevel = parameters.zParams.notificationLevel;
|
g_displayLevel = (int)parameters.zParams.notificationLevel;
|
||||||
/* Checks */
|
/* Checks */
|
||||||
if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
|
if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
|
||||||
DISPLAYLEVEL(1, "Cover parameters incorrect\n");
|
DISPLAYLEVEL(1, "Cover parameters incorrect\n");
|
||||||
|
@ -32,6 +32,13 @@
|
|||||||
/*-*************************************
|
/*-*************************************
|
||||||
* Constants
|
* Constants
|
||||||
***************************************/
|
***************************************/
|
||||||
|
/**
|
||||||
|
* There are 32bit indexes used to ref samples, so limit samples size to 4GB
|
||||||
|
* on 64bit builds.
|
||||||
|
* For 32bit builds we choose 1 GB.
|
||||||
|
* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
|
||||||
|
* contiguous buffer, so 1GB is already a high limit.
|
||||||
|
*/
|
||||||
#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
|
#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
|
||||||
#define FASTCOVER_MAX_F 31
|
#define FASTCOVER_MAX_F 31
|
||||||
#define FASTCOVER_MAX_ACCEL 10
|
#define FASTCOVER_MAX_ACCEL 10
|
||||||
@ -44,7 +51,7 @@
|
|||||||
* Console display
|
* Console display
|
||||||
***************************************/
|
***************************************/
|
||||||
#ifndef LOCALDISPLAYLEVEL
|
#ifndef LOCALDISPLAYLEVEL
|
||||||
static int g_displayLevel = 2;
|
static int g_displayLevel = 0;
|
||||||
#endif
|
#endif
|
||||||
#undef DISPLAY
|
#undef DISPLAY
|
||||||
#define DISPLAY(...) \
|
#define DISPLAY(...) \
|
||||||
@ -549,7 +556,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
|
|||||||
ZDICT_cover_params_t coverParams;
|
ZDICT_cover_params_t coverParams;
|
||||||
FASTCOVER_accel_t accelParams;
|
FASTCOVER_accel_t accelParams;
|
||||||
/* Initialize global data */
|
/* Initialize global data */
|
||||||
g_displayLevel = parameters.zParams.notificationLevel;
|
g_displayLevel = (int)parameters.zParams.notificationLevel;
|
||||||
/* Assign splitPoint and f if not provided */
|
/* Assign splitPoint and f if not provided */
|
||||||
parameters.splitPoint = 1.0;
|
parameters.splitPoint = 1.0;
|
||||||
parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
|
parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
|
||||||
@ -632,7 +639,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover(
|
|||||||
const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
|
const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
|
||||||
const unsigned shrinkDict = 0;
|
const unsigned shrinkDict = 0;
|
||||||
/* Local variables */
|
/* Local variables */
|
||||||
const int displayLevel = parameters->zParams.notificationLevel;
|
const int displayLevel = (int)parameters->zParams.notificationLevel;
|
||||||
unsigned iteration = 1;
|
unsigned iteration = 1;
|
||||||
unsigned d;
|
unsigned d;
|
||||||
unsigned k;
|
unsigned k;
|
||||||
@ -716,7 +723,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover(
|
|||||||
data->parameters.splitPoint = splitPoint;
|
data->parameters.splitPoint = splitPoint;
|
||||||
data->parameters.steps = kSteps;
|
data->parameters.steps = kSteps;
|
||||||
data->parameters.shrinkDict = shrinkDict;
|
data->parameters.shrinkDict = shrinkDict;
|
||||||
data->parameters.zParams.notificationLevel = g_displayLevel;
|
data->parameters.zParams.notificationLevel = (unsigned)g_displayLevel;
|
||||||
/* Check the parameters */
|
/* Check the parameters */
|
||||||
if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
|
if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
|
||||||
data->ctx->f, accel)) {
|
data->ctx->f, accel)) {
|
||||||
|
@ -135,22 +135,32 @@ static unsigned ZDICT_NbCommonBytes (size_t val)
|
|||||||
if (MEM_isLittleEndian()) {
|
if (MEM_isLittleEndian()) {
|
||||||
if (MEM_64bits()) {
|
if (MEM_64bits()) {
|
||||||
# if defined(_MSC_VER) && defined(_WIN64)
|
# if defined(_MSC_VER) && defined(_WIN64)
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
|
unsigned long r;
|
||||||
_BitScanForward64(&r, (U64)val);
|
_BitScanForward64(&r, (U64)val);
|
||||||
return (unsigned)(r >> 3);
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
return (__builtin_ctzll((U64)val) >> 3);
|
return (unsigned)(__builtin_ctzll((U64)val) >> 3);
|
||||||
# else
|
# else
|
||||||
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
|
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
|
||||||
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
||||||
# endif
|
# endif
|
||||||
} else { /* 32 bits */
|
} else { /* 32 bits */
|
||||||
# if defined(_MSC_VER)
|
# if defined(_MSC_VER)
|
||||||
unsigned long r=0;
|
if (val != 0) {
|
||||||
|
unsigned long r;
|
||||||
_BitScanForward(&r, (U32)val);
|
_BitScanForward(&r, (U32)val);
|
||||||
return (unsigned)(r >> 3);
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
return (__builtin_ctz((U32)val) >> 3);
|
return (unsigned)(__builtin_ctz((U32)val) >> 3);
|
||||||
# else
|
# else
|
||||||
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
|
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
|
||||||
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
||||||
@ -159,11 +169,16 @@ static unsigned ZDICT_NbCommonBytes (size_t val)
|
|||||||
} else { /* Big Endian CPU */
|
} else { /* Big Endian CPU */
|
||||||
if (MEM_64bits()) {
|
if (MEM_64bits()) {
|
||||||
# if defined(_MSC_VER) && defined(_WIN64)
|
# if defined(_MSC_VER) && defined(_WIN64)
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
|
unsigned long r;
|
||||||
_BitScanReverse64(&r, val);
|
_BitScanReverse64(&r, val);
|
||||||
return (unsigned)(r >> 3);
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
return (__builtin_clzll(val) >> 3);
|
return (unsigned)(__builtin_clzll(val) >> 3);
|
||||||
# else
|
# else
|
||||||
unsigned r;
|
unsigned r;
|
||||||
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
|
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
|
||||||
@ -174,11 +189,16 @@ static unsigned ZDICT_NbCommonBytes (size_t val)
|
|||||||
# endif
|
# endif
|
||||||
} else { /* 32 bits */
|
} else { /* 32 bits */
|
||||||
# if defined(_MSC_VER)
|
# if defined(_MSC_VER)
|
||||||
unsigned long r = 0;
|
if (val != 0) {
|
||||||
|
unsigned long r;
|
||||||
_BitScanReverse(&r, (unsigned long)val);
|
_BitScanReverse(&r, (unsigned long)val);
|
||||||
return (unsigned)(r >> 3);
|
return (unsigned)(r >> 3);
|
||||||
|
} else {
|
||||||
|
/* Should not reach this code path */
|
||||||
|
__assume(0);
|
||||||
|
}
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
return (__builtin_clz((U32)val) >> 3);
|
return (unsigned)(__builtin_clz((U32)val) >> 3);
|
||||||
# else
|
# else
|
||||||
unsigned r;
|
unsigned r;
|
||||||
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
||||||
@ -235,7 +255,7 @@ static dictItem ZDICT_analyzePos(
|
|||||||
U32 savings[LLIMIT] = {0};
|
U32 savings[LLIMIT] = {0};
|
||||||
const BYTE* b = (const BYTE*)buffer;
|
const BYTE* b = (const BYTE*)buffer;
|
||||||
size_t maxLength = LLIMIT;
|
size_t maxLength = LLIMIT;
|
||||||
size_t pos = suffix[start];
|
size_t pos = (size_t)suffix[start];
|
||||||
U32 end = start;
|
U32 end = start;
|
||||||
dictItem solution;
|
dictItem solution;
|
||||||
|
|
||||||
@ -369,7 +389,7 @@ static dictItem ZDICT_analyzePos(
|
|||||||
savings[i] = savings[i-1] + (lengthList[i] * (i-3));
|
savings[i] = savings[i-1] + (lengthList[i] * (i-3));
|
||||||
|
|
||||||
DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
|
DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
|
||||||
(unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
|
(unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / (double)maxLength);
|
||||||
|
|
||||||
solution.pos = (U32)pos;
|
solution.pos = (U32)pos;
|
||||||
solution.length = (U32)maxLength;
|
solution.length = (U32)maxLength;
|
||||||
@ -379,7 +399,7 @@ static dictItem ZDICT_analyzePos(
|
|||||||
{ U32 id;
|
{ U32 id;
|
||||||
for (id=start; id<end; id++) {
|
for (id=start; id<end; id++) {
|
||||||
U32 p, pEnd, length;
|
U32 p, pEnd, length;
|
||||||
U32 const testedPos = suffix[id];
|
U32 const testedPos = (U32)suffix[id];
|
||||||
if (testedPos == pos)
|
if (testedPos == pos)
|
||||||
length = solution.length;
|
length = solution.length;
|
||||||
else {
|
else {
|
||||||
@ -442,7 +462,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
|
|||||||
|
|
||||||
if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */
|
if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */
|
||||||
/* append */
|
/* append */
|
||||||
int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
|
int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length);
|
||||||
table[u].savings += elt.length / 8; /* rough approx bonus */
|
table[u].savings += elt.length / 8; /* rough approx bonus */
|
||||||
if (addedLength > 0) { /* otherwise, elt fully included into existing */
|
if (addedLength > 0) { /* otherwise, elt fully included into existing */
|
||||||
table[u].length += addedLength;
|
table[u].length += addedLength;
|
||||||
@ -766,6 +786,13 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
|
|||||||
pos += fileSizes[u];
|
pos += fileSizes[u];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (notificationLevel >= 4) {
|
||||||
|
/* writeStats */
|
||||||
|
DISPLAYLEVEL(4, "Offset Code Frequencies : \n");
|
||||||
|
for (u=0; u<=offcodeMax; u++) {
|
||||||
|
DISPLAYLEVEL(4, "%2u :%7u \n", u, offcodeCount[u]);
|
||||||
|
} }
|
||||||
|
|
||||||
/* analyze, build stats, starting with literals */
|
/* analyze, build stats, starting with literals */
|
||||||
{ size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
|
{ size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
|
||||||
if (HUF_isError(maxNbBits)) {
|
if (HUF_isError(maxNbBits)) {
|
||||||
@ -872,7 +899,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
|
|||||||
MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
|
MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
|
||||||
#else
|
#else
|
||||||
/* at this stage, we don't use the result of "most common first offset",
|
/* at this stage, we don't use the result of "most common first offset",
|
||||||
as the impact of statistics is not properly evaluated */
|
* as the impact of statistics is not properly evaluated */
|
||||||
MEM_writeLE32(dstPtr+0, repStartValue[0]);
|
MEM_writeLE32(dstPtr+0, repStartValue[0]);
|
||||||
MEM_writeLE32(dstPtr+4, repStartValue[1]);
|
MEM_writeLE32(dstPtr+4, repStartValue[1]);
|
||||||
MEM_writeLE32(dstPtr+8, repStartValue[2]);
|
MEM_writeLE32(dstPtr+8, repStartValue[2]);
|
||||||
@ -888,6 +915,17 @@ _cleanup:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns the maximum repcode value
|
||||||
|
*/
|
||||||
|
static U32 ZDICT_maxRep(U32 const reps[ZSTD_REP_NUM])
|
||||||
|
{
|
||||||
|
U32 maxRep = reps[0];
|
||||||
|
int r;
|
||||||
|
for (r = 1; r < ZSTD_REP_NUM; ++r)
|
||||||
|
maxRep = MAX(maxRep, reps[r]);
|
||||||
|
return maxRep;
|
||||||
|
}
|
||||||
|
|
||||||
size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
|
size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
|
||||||
const void* customDictContent, size_t dictContentSize,
|
const void* customDictContent, size_t dictContentSize,
|
||||||
@ -899,11 +937,13 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
|
|||||||
BYTE header[HBUFFSIZE];
|
BYTE header[HBUFFSIZE];
|
||||||
int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
|
int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
|
||||||
U32 const notificationLevel = params.notificationLevel;
|
U32 const notificationLevel = params.notificationLevel;
|
||||||
|
/* The final dictionary content must be at least as large as the largest repcode */
|
||||||
|
size_t const minContentSize = (size_t)ZDICT_maxRep(repStartValue);
|
||||||
|
size_t paddingSize;
|
||||||
|
|
||||||
/* check conditions */
|
/* check conditions */
|
||||||
DEBUGLOG(4, "ZDICT_finalizeDictionary");
|
DEBUGLOG(4, "ZDICT_finalizeDictionary");
|
||||||
if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
|
if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
|
||||||
if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
|
|
||||||
if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
|
if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
|
||||||
|
|
||||||
/* dictionary header */
|
/* dictionary header */
|
||||||
@ -927,12 +967,43 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
|
|||||||
hSize += eSize;
|
hSize += eSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy elements in final buffer ; note : src and dst buffer can overlap */
|
/* Shrink the content size if it doesn't fit in the buffer */
|
||||||
if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
|
if (hSize + dictContentSize > dictBufferCapacity) {
|
||||||
{ size_t const dictSize = hSize + dictContentSize;
|
dictContentSize = dictBufferCapacity - hSize;
|
||||||
char* dictEnd = (char*)dictBuffer + dictSize;
|
}
|
||||||
memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
|
|
||||||
memcpy(dictBuffer, header, hSize);
|
/* Pad the dictionary content with zeros if it is too small */
|
||||||
|
if (dictContentSize < minContentSize) {
|
||||||
|
RETURN_ERROR_IF(hSize + minContentSize > dictBufferCapacity, dstSize_tooSmall,
|
||||||
|
"dictBufferCapacity too small to fit max repcode");
|
||||||
|
paddingSize = minContentSize - dictContentSize;
|
||||||
|
} else {
|
||||||
|
paddingSize = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
size_t const dictSize = hSize + paddingSize + dictContentSize;
|
||||||
|
|
||||||
|
/* The dictionary consists of the header, optional padding, and the content.
|
||||||
|
* The padding comes before the content because the "best" position in the
|
||||||
|
* dictionary is the last byte.
|
||||||
|
*/
|
||||||
|
BYTE* const outDictHeader = (BYTE*)dictBuffer;
|
||||||
|
BYTE* const outDictPadding = outDictHeader + hSize;
|
||||||
|
BYTE* const outDictContent = outDictPadding + paddingSize;
|
||||||
|
|
||||||
|
assert(dictSize <= dictBufferCapacity);
|
||||||
|
assert(outDictContent + dictContentSize == (BYTE*)dictBuffer + dictSize);
|
||||||
|
|
||||||
|
/* First copy the customDictContent into its final location.
|
||||||
|
* `customDictContent` and `dictBuffer` may overlap, so we must
|
||||||
|
* do this before any other writes into the output buffer.
|
||||||
|
* Then copy the header & padding into the output buffer.
|
||||||
|
*/
|
||||||
|
memmove(outDictContent, customDictContent, dictContentSize);
|
||||||
|
memcpy(outDictHeader, header, hSize);
|
||||||
|
memset(outDictPadding, 0, paddingSize);
|
||||||
|
|
||||||
return dictSize;
|
return dictSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ extern "C" {
|
|||||||
*
|
*
|
||||||
* Zstd can use dictionaries to improve compression ratio of small data.
|
* Zstd can use dictionaries to improve compression ratio of small data.
|
||||||
* Traditionally small files don't compress well because there is very little
|
* Traditionally small files don't compress well because there is very little
|
||||||
* repetion in a single sample, since it is small. But, if you are compressing
|
* repetition in a single sample, since it is small. But, if you are compressing
|
||||||
* many similar files, like a bunch of JSON records that share the same
|
* many similar files, like a bunch of JSON records that share the same
|
||||||
* structure, you can train a dictionary on ahead of time on some samples of
|
* structure, you can train a dictionary on ahead of time on some samples of
|
||||||
* these files. Then, zstd can use the dictionary to find repetitions that are
|
* these files. Then, zstd can use the dictionary to find repetitions that are
|
||||||
@ -132,7 +132,7 @@ extern "C" {
|
|||||||
*
|
*
|
||||||
* # Benchmark levels 1-3 without a dictionary
|
* # Benchmark levels 1-3 without a dictionary
|
||||||
* zstd -b1e3 -r /path/to/my/files
|
* zstd -b1e3 -r /path/to/my/files
|
||||||
* # Benchmark levels 1-3 with a dictioanry
|
* # Benchmark levels 1-3 with a dictionary
|
||||||
* zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary
|
* zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary
|
||||||
*
|
*
|
||||||
* When should I retrain a dictionary?
|
* When should I retrain a dictionary?
|
||||||
@ -237,7 +237,6 @@ typedef struct {
|
|||||||
* is presumed that the most profitable content is at the end of the dictionary,
|
* is presumed that the most profitable content is at the end of the dictionary,
|
||||||
* since that is the cheapest to reference.
|
* since that is the cheapest to reference.
|
||||||
*
|
*
|
||||||
* `dictContentSize` must be >= ZDICT_CONTENTSIZE_MIN bytes.
|
|
||||||
* `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).
|
* `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).
|
||||||
*
|
*
|
||||||
* @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),
|
* @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),
|
||||||
@ -272,8 +271,9 @@ ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
|
|||||||
* Use them only in association with static linking.
|
* Use them only in association with static linking.
|
||||||
* ==================================================================================== */
|
* ==================================================================================== */
|
||||||
|
|
||||||
#define ZDICT_CONTENTSIZE_MIN 128
|
|
||||||
#define ZDICT_DICTSIZE_MIN 256
|
#define ZDICT_DICTSIZE_MIN 256
|
||||||
|
/* Deprecated: Remove in v1.6.0 */
|
||||||
|
#define ZDICT_CONTENTSIZE_MIN 128
|
||||||
|
|
||||||
/*! ZDICT_cover_params_t:
|
/*! ZDICT_cover_params_t:
|
||||||
* k and d are the only required parameters.
|
* k and d are the only required parameters.
|
||||||
|
295
zstd/zstd.h
295
zstd/zstd.h
@ -20,19 +20,21 @@ extern "C" {
|
|||||||
|
|
||||||
|
|
||||||
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
|
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
|
||||||
#ifndef ZSTDLIB_VISIBILITY
|
#ifndef ZSTDLIB_VISIBLE
|
||||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||||
# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default")))
|
||||||
|
# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||||
# else
|
# else
|
||||||
# define ZSTDLIB_VISIBILITY
|
# define ZSTDLIB_VISIBLE
|
||||||
|
# define ZSTDLIB_HIDDEN
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||||
# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
|
# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE
|
||||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||||
# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||||
#else
|
#else
|
||||||
# define ZSTDLIB_API ZSTDLIB_VISIBILITY
|
# define ZSTDLIB_API ZSTDLIB_VISIBLE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@ -72,7 +74,7 @@ extern "C" {
|
|||||||
/*------ Version ------*/
|
/*------ Version ------*/
|
||||||
#define ZSTD_VERSION_MAJOR 1
|
#define ZSTD_VERSION_MAJOR 1
|
||||||
#define ZSTD_VERSION_MINOR 5
|
#define ZSTD_VERSION_MINOR 5
|
||||||
#define ZSTD_VERSION_RELEASE 0
|
#define ZSTD_VERSION_RELEASE 1
|
||||||
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
||||||
|
|
||||||
/*! ZSTD_versionNumber() :
|
/*! ZSTD_versionNumber() :
|
||||||
@ -247,7 +249,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
|
|||||||
*
|
*
|
||||||
* It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
|
* It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
|
||||||
*
|
*
|
||||||
* This API supercedes all other "advanced" API entry points in the experimental section.
|
* This API supersedes all other "advanced" API entry points in the experimental section.
|
||||||
* In the future, we expect to remove from experimental API entry points which are redundant with this API.
|
* In the future, we expect to remove from experimental API entry points which are redundant with this API.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -417,7 +419,7 @@ typedef enum {
|
|||||||
* ZSTD_c_stableOutBuffer
|
* ZSTD_c_stableOutBuffer
|
||||||
* ZSTD_c_blockDelimiters
|
* ZSTD_c_blockDelimiters
|
||||||
* ZSTD_c_validateSequences
|
* ZSTD_c_validateSequences
|
||||||
* ZSTD_c_splitBlocks
|
* ZSTD_c_useBlockSplitter
|
||||||
* ZSTD_c_useRowMatchFinder
|
* ZSTD_c_useRowMatchFinder
|
||||||
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
||||||
* note : never ever use experimentalParam? names directly;
|
* note : never ever use experimentalParam? names directly;
|
||||||
@ -932,7 +934,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
|
|||||||
* Advanced dictionary and prefix API (Requires v1.4.0+)
|
* Advanced dictionary and prefix API (Requires v1.4.0+)
|
||||||
*
|
*
|
||||||
* This API allows dictionaries to be used with ZSTD_compress2(),
|
* This API allows dictionaries to be used with ZSTD_compress2(),
|
||||||
* ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
|
* ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
|
||||||
* only reset with the context is reset with ZSTD_reset_parameters or
|
* only reset with the context is reset with ZSTD_reset_parameters or
|
||||||
* ZSTD_reset_session_and_parameters. Prefixes are single-use.
|
* ZSTD_reset_session_and_parameters. Prefixes are single-use.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
@ -1073,25 +1075,36 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||||||
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
|
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
|
||||||
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
|
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
|
||||||
|
|
||||||
|
/* This can be overridden externally to hide static symbols. */
|
||||||
|
#ifndef ZSTDLIB_STATIC_API
|
||||||
|
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||||
|
# define ZSTDLIB_STATIC_API __declspec(dllexport) ZSTDLIB_VISIBLE
|
||||||
|
# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||||
|
# define ZSTDLIB_STATIC_API __declspec(dllimport) ZSTDLIB_VISIBLE
|
||||||
|
# else
|
||||||
|
# define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Deprecation warnings :
|
/* Deprecation warnings :
|
||||||
* Should these warnings be a problem, it is generally possible to disable them,
|
* Should these warnings be a problem, it is generally possible to disable them,
|
||||||
* typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
|
* typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||||
* Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
|
* Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
|
||||||
*/
|
*/
|
||||||
#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
|
#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
|
||||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */
|
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */
|
||||||
#else
|
#else
|
||||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||||
# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
|
# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_STATIC_API
|
||||||
# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
|
# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
|
||||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
|
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message)))
|
||||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
|
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated))
|
||||||
# elif defined(_MSC_VER)
|
# elif defined(_MSC_VER)
|
||||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
|
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __declspec(deprecated(message))
|
||||||
# else
|
# else
|
||||||
# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
|
# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
|
||||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_API
|
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API
|
||||||
# endif
|
# endif
|
||||||
#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
|
#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
|
||||||
|
|
||||||
@ -1157,9 +1170,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||||||
#define ZSTD_SRCSIZEHINT_MIN 0
|
#define ZSTD_SRCSIZEHINT_MIN 0
|
||||||
#define ZSTD_SRCSIZEHINT_MAX INT_MAX
|
#define ZSTD_SRCSIZEHINT_MAX INT_MAX
|
||||||
|
|
||||||
/* internal */
|
|
||||||
#define ZSTD_HASHLOG3_MAX 17
|
|
||||||
|
|
||||||
|
|
||||||
/* --- Advanced types --- */
|
/* --- Advanced types --- */
|
||||||
|
|
||||||
@ -1302,10 +1312,14 @@ typedef enum {
|
|||||||
} ZSTD_literalCompressionMode_e;
|
} ZSTD_literalCompressionMode_e;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
ZSTD_urm_auto = 0, /* Automatically determine whether or not we use row matchfinder */
|
/* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
|
||||||
ZSTD_urm_disableRowMatchFinder = 1, /* Never use row matchfinder */
|
* decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
|
||||||
ZSTD_urm_enableRowMatchFinder = 2 /* Always use row matchfinder when applicable */
|
* or ZSTD_ps_disable allow for a force enable/disable the feature.
|
||||||
} ZSTD_useRowMatchFinderMode_e;
|
*/
|
||||||
|
ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
|
||||||
|
ZSTD_ps_enable = 1, /* Force-enable the feature */
|
||||||
|
ZSTD_ps_disable = 2 /* Do not use the feature */
|
||||||
|
} ZSTD_paramSwitch_e;
|
||||||
|
|
||||||
/***************************************
|
/***************************************
|
||||||
* Frame size functions
|
* Frame size functions
|
||||||
@ -1332,7 +1346,7 @@ typedef enum {
|
|||||||
* note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
|
* note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
|
||||||
* read each contained frame header. This is fast as most of the data is skipped,
|
* read each contained frame header. This is fast as most of the data is skipped,
|
||||||
* however it does mean that all frame data must be present and valid. */
|
* however it does mean that all frame data must be present and valid. */
|
||||||
ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
|
||||||
|
|
||||||
/*! ZSTD_decompressBound() :
|
/*! ZSTD_decompressBound() :
|
||||||
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames
|
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames
|
||||||
@ -1347,13 +1361,13 @@ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t
|
|||||||
* note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
|
* note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
|
||||||
* upper-bound = # blocks * min(128 KB, Window_Size)
|
* upper-bound = # blocks * min(128 KB, Window_Size)
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
|
||||||
|
|
||||||
/*! ZSTD_frameHeaderSize() :
|
/*! ZSTD_frameHeaderSize() :
|
||||||
* srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
|
* srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
|
||||||
* @return : size of the Frame Header,
|
* @return : size of the Frame Header,
|
||||||
* or an error code (if srcSize is too small) */
|
* or an error code (if srcSize is too small) */
|
||||||
ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
|
ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
|
||||||
@ -1376,7 +1390,7 @@ typedef enum {
|
|||||||
* @return : number of sequences generated
|
* @return : number of sequences generated
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
||||||
size_t outSeqsSize, const void* src, size_t srcSize);
|
size_t outSeqsSize, const void* src, size_t srcSize);
|
||||||
|
|
||||||
/*! ZSTD_mergeBlockDelimiters() :
|
/*! ZSTD_mergeBlockDelimiters() :
|
||||||
@ -1390,7 +1404,7 @@ ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
|||||||
* setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
|
* setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
|
||||||
* @return : number of sequences left after merging
|
* @return : number of sequences left after merging
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
|
||||||
|
|
||||||
/*! ZSTD_compressSequences() :
|
/*! ZSTD_compressSequences() :
|
||||||
* Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
|
* Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
|
||||||
@ -1420,7 +1434,7 @@ ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t se
|
|||||||
* and cannot emit an RLE block that disagrees with the repcode history
|
* and cannot emit an RLE block that disagrees with the repcode history
|
||||||
* @return : final compressed size or a ZSTD error.
|
* @return : final compressed size or a ZSTD error.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
|
ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
|
||||||
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
|
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
|
||||||
const void* src, size_t srcSize);
|
const void* src, size_t srcSize);
|
||||||
|
|
||||||
@ -1438,9 +1452,29 @@ ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size
|
|||||||
*
|
*
|
||||||
* @return : number of bytes written or a ZSTD error.
|
* @return : number of bytes written or a ZSTD error.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
|
ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
|
||||||
const void* src, size_t srcSize, unsigned magicVariant);
|
const void* src, size_t srcSize, unsigned magicVariant);
|
||||||
|
|
||||||
|
/*! ZSTD_readSkippableFrame() :
|
||||||
|
* Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
|
||||||
|
*
|
||||||
|
* The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
|
||||||
|
* i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
|
||||||
|
* in the magicVariant.
|
||||||
|
*
|
||||||
|
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
|
||||||
|
*
|
||||||
|
* @return : number of bytes written or a ZSTD error.
|
||||||
|
*/
|
||||||
|
ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
|
||||||
|
const void* src, size_t srcSize);
|
||||||
|
|
||||||
|
/*! ZSTD_isSkippableFrame() :
|
||||||
|
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
|
||||||
|
*/
|
||||||
|
ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/***************************************
|
/***************************************
|
||||||
* Memory management
|
* Memory management
|
||||||
@ -1469,10 +1503,10 @@ ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
|
|||||||
* Note 2 : only single-threaded compression is supported.
|
* Note 2 : only single-threaded compression is supported.
|
||||||
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
|
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
|
||||||
|
|
||||||
/*! ZSTD_estimateCStreamSize() :
|
/*! ZSTD_estimateCStreamSize() :
|
||||||
* ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
|
* ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
|
||||||
@ -1487,20 +1521,20 @@ ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
|
|||||||
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
|
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
|
||||||
* an internal ?Dict will be created, which additional size is not estimated here.
|
* an internal ?Dict will be created, which additional size is not estimated here.
|
||||||
* In this case, get total size by adding ZSTD_estimate?DictSize */
|
* In this case, get total size by adding ZSTD_estimate?DictSize */
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
|
||||||
|
|
||||||
/*! ZSTD_estimate?DictSize() :
|
/*! ZSTD_estimate?DictSize() :
|
||||||
* ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
|
* ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
|
||||||
* ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
|
* ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
|
||||||
* Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
|
* Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
|
||||||
ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
|
ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
|
||||||
|
|
||||||
/*! ZSTD_initStatic*() :
|
/*! ZSTD_initStatic*() :
|
||||||
* Initialize an object using a pre-allocated fixed-size buffer.
|
* Initialize an object using a pre-allocated fixed-size buffer.
|
||||||
@ -1523,20 +1557,20 @@ ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e
|
|||||||
* Limitation 2 : static cctx currently not compatible with multi-threading.
|
* Limitation 2 : static cctx currently not compatible with multi-threading.
|
||||||
* Limitation 3 : static dctx is incompatible with legacy support.
|
* Limitation 3 : static dctx is incompatible with legacy support.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
|
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
|
||||||
ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
|
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
|
||||||
|
|
||||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
|
ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
|
||||||
ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
|
ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
|
||||||
|
|
||||||
ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
|
ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict(
|
||||||
void* workspace, size_t workspaceSize,
|
void* workspace, size_t workspaceSize,
|
||||||
const void* dict, size_t dictSize,
|
const void* dict, size_t dictSize,
|
||||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||||
ZSTD_dictContentType_e dictContentType,
|
ZSTD_dictContentType_e dictContentType,
|
||||||
ZSTD_compressionParameters cParams);
|
ZSTD_compressionParameters cParams);
|
||||||
|
|
||||||
ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
|
ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict(
|
||||||
void* workspace, size_t workspaceSize,
|
void* workspace, size_t workspaceSize,
|
||||||
const void* dict, size_t dictSize,
|
const void* dict, size_t dictSize,
|
||||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||||
@ -1557,12 +1591,12 @@ __attribute__((__unused__))
|
|||||||
#endif
|
#endif
|
||||||
ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
|
ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
|
||||||
|
|
||||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
|
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
|
||||||
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
|
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
|
||||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
|
ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
|
||||||
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
|
ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
|
||||||
|
|
||||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
|
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
|
||||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||||
ZSTD_dictContentType_e dictContentType,
|
ZSTD_dictContentType_e dictContentType,
|
||||||
ZSTD_compressionParameters cParams,
|
ZSTD_compressionParameters cParams,
|
||||||
@ -1579,22 +1613,22 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictS
|
|||||||
* ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
|
* ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
|
||||||
*/
|
*/
|
||||||
typedef struct POOL_ctx_s ZSTD_threadPool;
|
typedef struct POOL_ctx_s ZSTD_threadPool;
|
||||||
ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
|
ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
|
||||||
ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
|
ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
|
||||||
ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This API is temporary and is expected to change or disappear in the future!
|
* This API is temporary and is expected to change or disappear in the future!
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
|
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2(
|
||||||
const void* dict, size_t dictSize,
|
const void* dict, size_t dictSize,
|
||||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||||
ZSTD_dictContentType_e dictContentType,
|
ZSTD_dictContentType_e dictContentType,
|
||||||
const ZSTD_CCtx_params* cctxParams,
|
const ZSTD_CCtx_params* cctxParams,
|
||||||
ZSTD_customMem customMem);
|
ZSTD_customMem customMem);
|
||||||
|
|
||||||
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
|
ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced(
|
||||||
const void* dict, size_t dictSize,
|
const void* dict, size_t dictSize,
|
||||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||||
ZSTD_dictContentType_e dictContentType,
|
ZSTD_dictContentType_e dictContentType,
|
||||||
@ -1611,22 +1645,22 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
|
|||||||
* As a consequence, `dictBuffer` **must** outlive CDict,
|
* As a consequence, `dictBuffer` **must** outlive CDict,
|
||||||
* and its content must remain unmodified throughout the lifetime of CDict.
|
* and its content must remain unmodified throughout the lifetime of CDict.
|
||||||
* note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
|
* note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
|
||||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
|
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
|
||||||
|
|
||||||
/*! ZSTD_getCParams() :
|
/*! ZSTD_getCParams() :
|
||||||
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
|
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
|
||||||
* `estimatedSrcSize` value is optional, select 0 if not known */
|
* `estimatedSrcSize` value is optional, select 0 if not known */
|
||||||
ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
|
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
|
||||||
|
|
||||||
/*! ZSTD_getParams() :
|
/*! ZSTD_getParams() :
|
||||||
* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
|
* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
|
||||||
* All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
|
* All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
|
||||||
ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
|
ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
|
||||||
|
|
||||||
/*! ZSTD_checkCParams() :
|
/*! ZSTD_checkCParams() :
|
||||||
* Ensure param values remain within authorized range.
|
* Ensure param values remain within authorized range.
|
||||||
* @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
|
* @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
|
||||||
ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
|
ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
|
||||||
|
|
||||||
/*! ZSTD_adjustCParams() :
|
/*! ZSTD_adjustCParams() :
|
||||||
* optimize params for a given `srcSize` and `dictSize`.
|
* optimize params for a given `srcSize` and `dictSize`.
|
||||||
@ -1634,7 +1668,7 @@ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
|
|||||||
* `dictSize` must be `0` when there is no dictionary.
|
* `dictSize` must be `0` when there is no dictionary.
|
||||||
* cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
|
* cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
|
||||||
* This function never fails (wide contract) */
|
* This function never fails (wide contract) */
|
||||||
ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
|
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
|
||||||
|
|
||||||
/*! ZSTD_compress_advanced() :
|
/*! ZSTD_compress_advanced() :
|
||||||
* Note : this function is now DEPRECATED.
|
* Note : this function is now DEPRECATED.
|
||||||
@ -1662,18 +1696,18 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
|
|||||||
/*! ZSTD_CCtx_loadDictionary_byReference() :
|
/*! ZSTD_CCtx_loadDictionary_byReference() :
|
||||||
* Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
|
* Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
|
||||||
* It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
|
* It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
|
||||||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
|
||||||
|
|
||||||
/*! ZSTD_CCtx_loadDictionary_advanced() :
|
/*! ZSTD_CCtx_loadDictionary_advanced() :
|
||||||
* Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
|
* Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
|
||||||
* how to load the dictionary (by copy ? by reference ?)
|
* how to load the dictionary (by copy ? by reference ?)
|
||||||
* and how to interpret it (automatic ? force raw mode ? full mode only ?) */
|
* and how to interpret it (automatic ? force raw mode ? full mode only ?) */
|
||||||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
|
||||||
|
|
||||||
/*! ZSTD_CCtx_refPrefix_advanced() :
|
/*! ZSTD_CCtx_refPrefix_advanced() :
|
||||||
* Same as ZSTD_CCtx_refPrefix(), but gives finer control over
|
* Same as ZSTD_CCtx_refPrefix(), but gives finer control over
|
||||||
* how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
|
* how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
|
||||||
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
|
||||||
|
|
||||||
/* === experimental parameters === */
|
/* === experimental parameters === */
|
||||||
/* these parameters can be used with ZSTD_setParameter()
|
/* these parameters can be used with ZSTD_setParameter()
|
||||||
@ -1712,9 +1746,15 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
|
|||||||
* See the comments on that enum for an explanation of the feature. */
|
* See the comments on that enum for an explanation of the feature. */
|
||||||
#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
|
#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
|
||||||
|
|
||||||
/* Controls how the literals are compressed (default is auto).
|
/* Controlled with ZSTD_paramSwitch_e enum.
|
||||||
* The value must be of type ZSTD_literalCompressionMode_e.
|
* Default is ZSTD_ps_auto.
|
||||||
* See ZSTD_literalCompressionMode_e enum definition for details.
|
* Set to ZSTD_ps_disable to never compress literals.
|
||||||
|
* Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals
|
||||||
|
* may still be emitted if huffman is not beneficial to use.)
|
||||||
|
*
|
||||||
|
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
|
||||||
|
* literals compression based on the compression parameters - specifically,
|
||||||
|
* negative compression levels do not use literal compression.
|
||||||
*/
|
*/
|
||||||
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
|
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
|
||||||
|
|
||||||
@ -1777,7 +1817,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
|
|||||||
*
|
*
|
||||||
* Note that this means that the CDict tables can no longer be copied into the
|
* Note that this means that the CDict tables can no longer be copied into the
|
||||||
* CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
|
* CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
|
||||||
* useable. The dictionary can only be attached or reloaded.
|
* usable. The dictionary can only be attached or reloaded.
|
||||||
*
|
*
|
||||||
* In general, you should expect compression to be faster--sometimes very much
|
* In general, you should expect compression to be faster--sometimes very much
|
||||||
* so--and CDict creation to be slightly slower. Eventually, we will probably
|
* so--and CDict creation to be slightly slower. Eventually, we will probably
|
||||||
@ -1866,23 +1906,26 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
|
|||||||
*/
|
*/
|
||||||
#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
|
#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
|
||||||
|
|
||||||
/* ZSTD_c_splitBlocks
|
/* ZSTD_c_useBlockSplitter
|
||||||
* Default is 0 == disabled. Set to 1 to enable block splitting.
|
* Controlled with ZSTD_paramSwitch_e enum.
|
||||||
|
* Default is ZSTD_ps_auto.
|
||||||
|
* Set to ZSTD_ps_disable to never use block splitter.
|
||||||
|
* Set to ZSTD_ps_enable to always use block splitter.
|
||||||
*
|
*
|
||||||
* Will attempt to split blocks in order to improve compression ratio at the cost of speed.
|
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
|
||||||
|
* block splitting based on the compression parameters.
|
||||||
*/
|
*/
|
||||||
#define ZSTD_c_splitBlocks ZSTD_c_experimentalParam13
|
#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13
|
||||||
|
|
||||||
/* ZSTD_c_useRowMatchFinder
|
/* ZSTD_c_useRowMatchFinder
|
||||||
* Default is ZSTD_urm_auto.
|
* Controlled with ZSTD_paramSwitch_e enum.
|
||||||
* Controlled with ZSTD_useRowMatchFinderMode_e enum.
|
* Default is ZSTD_ps_auto.
|
||||||
|
* Set to ZSTD_ps_disable to never use row-based matchfinder.
|
||||||
|
* Set to ZSTD_ps_enable to force usage of row-based matchfinder.
|
||||||
*
|
*
|
||||||
* By default, in ZSTD_urm_auto, when finalizing the compression parameters, the library
|
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
|
||||||
* will decide at runtime whether to use the row-based matchfinder based on support for SIMD
|
* the row-based matchfinder based on support for SIMD instructions and the window log.
|
||||||
* instructions as well as the windowLog.
|
* Note that this only pertains to compression strategies: greedy, lazy, and lazy2
|
||||||
*
|
|
||||||
* Set to ZSTD_urm_disableRowMatchFinder to never use row-based matchfinder.
|
|
||||||
* Set to ZSTD_urm_enableRowMatchFinder to force usage of row-based matchfinder.
|
|
||||||
*/
|
*/
|
||||||
#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14
|
#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14
|
||||||
|
|
||||||
@ -1911,7 +1954,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
|
|||||||
* and store it into int* value.
|
* and store it into int* value.
|
||||||
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
|
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
|
||||||
|
|
||||||
|
|
||||||
/*! ZSTD_CCtx_params :
|
/*! ZSTD_CCtx_params :
|
||||||
@ -1931,25 +1974,25 @@ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter
|
|||||||
* This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
|
* This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
|
||||||
* for static allocation of CCtx for single-threaded compression.
|
* for static allocation of CCtx for single-threaded compression.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
|
ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
|
||||||
ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
|
ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
|
||||||
|
|
||||||
/*! ZSTD_CCtxParams_reset() :
|
/*! ZSTD_CCtxParams_reset() :
|
||||||
* Reset params to default values.
|
* Reset params to default values.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
|
||||||
|
|
||||||
/*! ZSTD_CCtxParams_init() :
|
/*! ZSTD_CCtxParams_init() :
|
||||||
* Initializes the compression parameters of cctxParams according to
|
* Initializes the compression parameters of cctxParams according to
|
||||||
* compression level. All other parameters are reset to their default values.
|
* compression level. All other parameters are reset to their default values.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
|
||||||
|
|
||||||
/*! ZSTD_CCtxParams_init_advanced() :
|
/*! ZSTD_CCtxParams_init_advanced() :
|
||||||
* Initializes the compression and frame parameters of cctxParams according to
|
* Initializes the compression and frame parameters of cctxParams according to
|
||||||
* params. All other parameters are reset to their default values.
|
* params. All other parameters are reset to their default values.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
|
||||||
|
|
||||||
/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+
|
/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+
|
||||||
* Similar to ZSTD_CCtx_setParameter.
|
* Similar to ZSTD_CCtx_setParameter.
|
||||||
@ -1959,14 +2002,14 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, Z
|
|||||||
* @result : a code representing success or failure (which can be tested with
|
* @result : a code representing success or failure (which can be tested with
|
||||||
* ZSTD_isError()).
|
* ZSTD_isError()).
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
|
||||||
|
|
||||||
/*! ZSTD_CCtxParams_getParameter() :
|
/*! ZSTD_CCtxParams_getParameter() :
|
||||||
* Similar to ZSTD_CCtx_getParameter.
|
* Similar to ZSTD_CCtx_getParameter.
|
||||||
* Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
|
* Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
|
||||||
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
|
||||||
|
|
||||||
/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
|
/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
|
||||||
* Apply a set of ZSTD_CCtx_params to the compression context.
|
* Apply a set of ZSTD_CCtx_params to the compression context.
|
||||||
@ -1975,7 +2018,7 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params,
|
|||||||
* if nbWorkers>=1, new parameters will be picked up at next job,
|
* if nbWorkers>=1, new parameters will be picked up at next job,
|
||||||
* with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
|
* with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
||||||
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
|
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
|
||||||
|
|
||||||
/*! ZSTD_compressStream2_simpleArgs() :
|
/*! ZSTD_compressStream2_simpleArgs() :
|
||||||
@ -1984,7 +2027,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
|||||||
* This variant might be helpful for binders from dynamic languages
|
* This variant might be helpful for binders from dynamic languages
|
||||||
* which have troubles handling structures containing memory pointers.
|
* which have troubles handling structures containing memory pointers.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
|
ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs (
|
||||||
ZSTD_CCtx* cctx,
|
ZSTD_CCtx* cctx,
|
||||||
void* dst, size_t dstCapacity, size_t* dstPos,
|
void* dst, size_t dstCapacity, size_t* dstPos,
|
||||||
const void* src, size_t srcSize, size_t* srcPos,
|
const void* src, size_t srcSize, size_t* srcPos,
|
||||||
@ -2000,33 +2043,33 @@ ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
|
|||||||
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
|
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
|
||||||
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
|
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
|
||||||
* Note 3 : Skippable Frame Identifiers are considered valid. */
|
* Note 3 : Skippable Frame Identifiers are considered valid. */
|
||||||
ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
|
ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
|
||||||
|
|
||||||
/*! ZSTD_createDDict_byReference() :
|
/*! ZSTD_createDDict_byReference() :
|
||||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||||
* Dictionary content is referenced, and therefore stays in dictBuffer.
|
* Dictionary content is referenced, and therefore stays in dictBuffer.
|
||||||
* It is important that dictBuffer outlives DDict,
|
* It is important that dictBuffer outlives DDict,
|
||||||
* it must remain read accessible throughout the lifetime of DDict */
|
* it must remain read accessible throughout the lifetime of DDict */
|
||||||
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
|
ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
|
||||||
|
|
||||||
/*! ZSTD_DCtx_loadDictionary_byReference() :
|
/*! ZSTD_DCtx_loadDictionary_byReference() :
|
||||||
* Same as ZSTD_DCtx_loadDictionary(),
|
* Same as ZSTD_DCtx_loadDictionary(),
|
||||||
* but references `dict` content instead of copying it into `dctx`.
|
* but references `dict` content instead of copying it into `dctx`.
|
||||||
* This saves memory if `dict` remains around.,
|
* This saves memory if `dict` remains around.,
|
||||||
* However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
|
* However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
|
||||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
||||||
|
|
||||||
/*! ZSTD_DCtx_loadDictionary_advanced() :
|
/*! ZSTD_DCtx_loadDictionary_advanced() :
|
||||||
* Same as ZSTD_DCtx_loadDictionary(),
|
* Same as ZSTD_DCtx_loadDictionary(),
|
||||||
* but gives direct control over
|
* but gives direct control over
|
||||||
* how to load the dictionary (by copy ? by reference ?)
|
* how to load the dictionary (by copy ? by reference ?)
|
||||||
* and how to interpret it (automatic ? force raw mode ? full mode only ?). */
|
* and how to interpret it (automatic ? force raw mode ? full mode only ?). */
|
||||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
|
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
|
||||||
|
|
||||||
/*! ZSTD_DCtx_refPrefix_advanced() :
|
/*! ZSTD_DCtx_refPrefix_advanced() :
|
||||||
* Same as ZSTD_DCtx_refPrefix(), but gives finer control over
|
* Same as ZSTD_DCtx_refPrefix(), but gives finer control over
|
||||||
* how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
|
* how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
|
||||||
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
|
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
|
||||||
|
|
||||||
/*! ZSTD_DCtx_setMaxWindowSize() :
|
/*! ZSTD_DCtx_setMaxWindowSize() :
|
||||||
* Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
|
* Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
|
||||||
@ -2035,14 +2078,14 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* pre
|
|||||||
* By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
|
* By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
|
||||||
* @return : 0, or an error code (which can be tested using ZSTD_isError()).
|
* @return : 0, or an error code (which can be tested using ZSTD_isError()).
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
|
||||||
|
|
||||||
/*! ZSTD_DCtx_getParameter() :
|
/*! ZSTD_DCtx_getParameter() :
|
||||||
* Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
|
* Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
|
||||||
* and store it into int* value.
|
* and store it into int* value.
|
||||||
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
|
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
|
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
|
||||||
|
|
||||||
/* ZSTD_d_format
|
/* ZSTD_d_format
|
||||||
* experimental parameter,
|
* experimental parameter,
|
||||||
@ -2131,7 +2174,7 @@ size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
|
|||||||
* This can be helpful for binders from dynamic languages
|
* This can be helpful for binders from dynamic languages
|
||||||
* which have troubles handling structures containing memory pointers.
|
* which have troubles handling structures containing memory pointers.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
|
ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
|
||||||
ZSTD_DCtx* dctx,
|
ZSTD_DCtx* dctx,
|
||||||
void* dst, size_t dstCapacity, size_t* dstPos,
|
void* dst, size_t dstCapacity, size_t* dstPos,
|
||||||
const void* src, size_t srcSize, size_t* srcPos);
|
const void* src, size_t srcSize, size_t* srcPos);
|
||||||
@ -2270,7 +2313,7 @@ typedef struct {
|
|||||||
* Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
|
* Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
|
||||||
* Aggregates progression inside active worker threads.
|
* Aggregates progression inside active worker threads.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
|
ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
|
||||||
|
|
||||||
/*! ZSTD_toFlushNow() :
|
/*! ZSTD_toFlushNow() :
|
||||||
* Tell how many bytes are ready to be flushed immediately.
|
* Tell how many bytes are ready to be flushed immediately.
|
||||||
@ -2285,7 +2328,7 @@ ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx
|
|||||||
* therefore flush speed is limited by production speed of oldest job
|
* therefore flush speed is limited by production speed of oldest job
|
||||||
* irrespective of the speed of concurrent (and newer) jobs.
|
* irrespective of the speed of concurrent (and newer) jobs.
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
|
ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
|
||||||
|
|
||||||
|
|
||||||
/*===== Advanced Streaming decompression functions =====*/
|
/*===== Advanced Streaming decompression functions =====*/
|
||||||
@ -2299,7 +2342,7 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
|
|||||||
* note: no dictionary will be used if dict == NULL or dictSize < 8
|
* note: no dictionary will be used if dict == NULL or dictSize < 8
|
||||||
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* This function is deprecated, and is equivalent to:
|
* This function is deprecated, and is equivalent to:
|
||||||
@ -2310,7 +2353,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dic
|
|||||||
* note : ddict is referenced, it must outlive decompression session
|
* note : ddict is referenced, it must outlive decompression session
|
||||||
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
|
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* This function is deprecated, and is equivalent to:
|
* This function is deprecated, and is equivalent to:
|
||||||
@ -2320,7 +2363,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDi
|
|||||||
* re-use decompression parameters from previous init; saves dictionary loading
|
* re-use decompression parameters from previous init; saves dictionary loading
|
||||||
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
||||||
*/
|
*/
|
||||||
ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
|
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
|
||||||
|
|
||||||
|
|
||||||
/*********************************************************************
|
/*********************************************************************
|
||||||
@ -2362,13 +2405,13 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/*===== Buffer-less streaming compression functions =====*/
|
/*===== Buffer-less streaming compression functions =====*/
|
||||||
ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
|
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
|
||||||
ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
|
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
|
||||||
ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
|
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
|
||||||
ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
|
ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
|
||||||
|
|
||||||
ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||||
ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||||
|
|
||||||
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
|
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
|
||||||
ZSTD_DEPRECATED("use advanced API to access custom parameters")
|
ZSTD_DEPRECATED("use advanced API to access custom parameters")
|
||||||
@ -2465,24 +2508,24 @@ typedef struct {
|
|||||||
* @return : 0, `zfhPtr` is correctly filled,
|
* @return : 0, `zfhPtr` is correctly filled,
|
||||||
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
|
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
|
||||||
* or an error code, which can be tested using ZSTD_isError() */
|
* or an error code, which can be tested using ZSTD_isError() */
|
||||||
ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
||||||
/*! ZSTD_getFrameHeader_advanced() :
|
/*! ZSTD_getFrameHeader_advanced() :
|
||||||
* same as ZSTD_getFrameHeader(),
|
* same as ZSTD_getFrameHeader(),
|
||||||
* with added capability to select a format (like ZSTD_f_zstd1_magicless) */
|
* with added capability to select a format (like ZSTD_f_zstd1_magicless) */
|
||||||
ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
|
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
|
||||||
ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
|
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
|
||||||
|
|
||||||
ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
|
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
|
||||||
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
||||||
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
|
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
|
||||||
|
|
||||||
ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
|
ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
|
||||||
ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||||
|
|
||||||
/* misc */
|
/* misc */
|
||||||
ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
|
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
|
||||||
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
|
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
|
||||||
ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -2519,10 +2562,10 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/*===== Raw zstd block functions =====*/
|
/*===== Raw zstd block functions =====*/
|
||||||
ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
|
ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
|
||||||
ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||||
ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||||
ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
|
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
|
||||||
|
|
||||||
|
|
||||||
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
|
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
|
||||||
|
Loading…
Reference in New Issue
Block a user