zstd: Update to upstream version 1.4.5

(cherry picked from commit 914591c9ae)
This commit is contained in:
Rémi Verschelde 2020-09-18 21:38:36 +02:00
parent 8a97e65297
commit 64b0f5db40
No known key found for this signature in database
GPG Key ID: C3336907360768E1
57 changed files with 2914 additions and 1499 deletions

View File

@ -362,7 +362,7 @@ License: Zlib
Files: ./thirdparty/zstd/
Comment: Zstandard
Copyright: 2016-2019, Facebook, Inc.
Copyright: 2016-2020, Facebook, Inc.
License: BSD-3-clause

View File

@ -122,6 +122,7 @@ if env["builtin_zstd"]:
"compress/zstdmt_compress.c",
"compress/zstd_compress_literals.c",
"compress/zstd_compress_sequences.c",
"compress/zstd_compress_superblock.c",
"decompress/huf_decompress.c",
"decompress/zstd_ddict.c",
"decompress/zstd_decompress_block.c",

View File

@ -529,7 +529,7 @@ Files extracted from upstream source:
## zstd
- Upstream: https://github.com/facebook/zstd
- Version: 1.4.4 (2019)
- Version: 1.4.5 (2020)
- License: BSD-3-Clause
Files extracted from upstream source:

View File

@ -1,35 +1,15 @@
/* ******************************************************************
bitstream
Part of FSE library
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
* bitstream
* Part of FSE library
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
@ -48,6 +28,7 @@ extern "C" {
* Dependencies
******************************************/
#include "mem.h" /* unaligned access routines */
#include "compiler.h" /* UNLIKELY() */
#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
#include "error_private.h" /* error codes and messages */
@ -161,8 +142,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
return _BitScanReverse ( &r, val ) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
@ -411,6 +391,23 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
return value;
}
/*! BIT_reloadDStreamFast() :
* Similar to BIT_reloadDStream(), but with two differences:
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
* point you must use BIT_reloadDStream() to reload.
*/
MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
{
if (UNLIKELY(bitD->ptr < bitD->limitPtr))
return BIT_DStream_overflow;
assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_unfinished;
}
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
* This function is safe, it guarantees it will not read beyond src buffer.
@ -422,10 +419,7 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->limitPtr) {
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_unfinished;
return BIT_reloadDStreamFast(bitD);
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -17,7 +17,7 @@
/* force inlining */
#if !defined(ZSTD_NO_INLINE)
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# define INLINE_KEYWORD inline
#else
# define INLINE_KEYWORD
@ -114,6 +114,9 @@
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
# elif defined(__aarch64__)
# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
@ -136,7 +139,7 @@
/* vectorization
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
#if !defined(__clang__) && defined(__GNUC__)
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
# else
@ -146,6 +149,19 @@
# define DONT_VECTORIZE
#endif
/* Tell the compiler that a branch is likely or unlikely.
* Only use these macros if it causes the compiler to generate better code.
* If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
* and clang, please do.
*/
#if defined(__GNUC__)
#define LIKELY(x) (__builtin_expect((x), 1))
#define UNLIKELY(x) (__builtin_expect((x), 0))
#else
#define LIKELY(x) (x)
#define UNLIKELY(x) (x)
#endif
/* disable warnings */
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-present, Facebook, Inc.
* Copyright (c) 2018-2020, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,35 +1,15 @@
/* ******************************************************************
debug
Part of FSE library
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
* debug
* Part of FSE library
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */

View File

@ -1,35 +1,15 @@
/* ******************************************************************
debug
Part of FSE library
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
* debug
* Part of FSE library
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */

View File

@ -1,36 +1,16 @@
/*
Common functions of New Generation Entropy library
Copyright (C) 2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
*************************************************************************** */
/* ******************************************************************
* Common functions of New Generation Entropy library
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* *************************************
* Dependencies

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -47,6 +47,7 @@ const char* ERR_getErrorString(ERR_enum code)
/* following error codes are not stable and may be removed or changed in a future version */
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
case PREFIX(maxCode):
default: return notErrorCode;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -49,7 +49,7 @@ typedef ZSTD_ErrorCode ERR_enum;
/*-****************************************
* Error codes handling
******************************************/
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
#undef ERROR /* already defined on Visual Studio */
#define ERROR(name) ZSTD_ERROR(name)
#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
@ -57,6 +57,10 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
/* check and forward error code */
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
/*-****************************************
* Error Strings

View File

@ -1,35 +1,15 @@
/* ******************************************************************
FSE : Finite State Entropy codec
Public Prototypes declaration
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
* FSE : Finite State Entropy codec
* Public Prototypes declaration
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
#if defined (__cplusplus)

View File

@ -1,35 +1,15 @@
/* ******************************************************************
FSE : Finite State Entropy decoder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
* FSE : Finite State Entropy decoder
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
@ -51,11 +31,6 @@
#define FSE_isError ERR_isError
#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
/* check and forward error code */
#ifndef CHECK_F
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
#endif
/* **************************************************************
* Templates
@ -287,7 +262,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
/* normal FSE decoding mode */
size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSE_isError(NCountLength)) return NCountLength;
//if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
/* if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); */ /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
ip += NCountLength;
cSrcSize -= NCountLength;

View File

@ -1,35 +1,15 @@
/* ******************************************************************
huff0 huffman codec,
part of Finite State Entropy library
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
* huff0 huffman codec,
* part of Finite State Entropy library
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
#if defined (__cplusplus)
@ -110,7 +90,7 @@ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
/** HUF_compress4X_wksp() :
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
* `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
#define HUF_WORKSPACE_SIZE (6 << 10)
#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -208,6 +188,8 @@ typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
typedef enum {
HUF_repeat_none, /**< Cannot use the previous table */
@ -246,7 +228,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
/** HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
/** HUF_getNbBits() :
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -18,7 +18,7 @@ extern "C" {
#include <stddef.h> /* size_t */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
#include "zstd.h"
#include "../zstd.h"
typedef struct POOL_ctx_s POOL_ctx;

View File

@ -2,12 +2,13 @@
* Copyright (c) 2016 Tino Reichardt
* All rights reserved.
*
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
*
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
* You may select, at your option, one of the above-listed licenses.
*/
/**

View File

@ -2,12 +2,13 @@
* Copyright (c) 2016 Tino Reichardt
* All rights reserved.
*
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
*
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef THREADING_H_938743

View File

@ -1,35 +1,15 @@
/*
* xxHash - Fast Hash algorithm
* Copyright (C) 2012-2016, Yann Collet
*
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - xxHash homepage: http://www.xxhash.com
* - xxHash source repository : https://github.com/Cyan4973/xxHash
* xxHash - Fast Hash algorithm
* Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - xxHash homepage: http://www.xxhash.com
* - xxHash source repository : https://github.com/Cyan4973/xxHash
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
@ -115,7 +95,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
/* *************************************
* Compiler Specific Options
***************************************/
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# define INLINE_KEYWORD inline
#else
# define INLINE_KEYWORD
@ -729,7 +709,9 @@ FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, c
state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
if (input != NULL) {
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
}
state->memsize += (U32)len;
return XXH_OK;
}

View File

@ -1,35 +1,15 @@
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
* xxHash - Extremely Fast Hash algorithm
* Header File
* Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - xxHash source repository : https://github.com/Cyan4973/xxHash
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/* Notice extracted from xxHash homepage :

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -76,6 +76,7 @@ typedef enum {
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
ZSTD_error_frameIndex_tooLarge = 100,
ZSTD_error_seekableIO = 102,
ZSTD_error_dstBuffer_wrong = 104,
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -19,12 +19,15 @@
/*-*************************************
* Dependencies
***************************************/
#ifdef __aarch64__
#include <arm_neon.h>
#endif
#include "compiler.h"
#include "mem.h"
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
#include "error_private.h"
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
#include "../zstd.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#define HUF_STATIC_LINKING_ONLY
@ -53,6 +56,31 @@ extern "C" {
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
/**
* Ignore: this is an internal helper.
*
* This is a helper function to help force C99-correctness during compilation.
* Under strict compilation modes, variadic macro arguments can't be empty.
* However, variadic function arguments can be. Using a function therefore lets
* us statically check that at least one (string) argument was passed,
* independent of the compilation flags.
*/
static INLINE_KEYWORD UNUSED_ATTR
void _force_has_format_string(const char *format, ...) {
(void)format;
}
/**
* Ignore: this is an internal helper.
*
* We want to force this function invocation to be syntactically correct, but
* we don't want to force runtime evaluation of its arguments.
*/
#define _FORCE_HAS_FORMAT_STRING(...) \
if (0) { \
_force_has_format_string(__VA_ARGS__); \
}
/**
* Return the specified error if the condition evaluates to true.
*
@ -62,7 +90,9 @@ extern "C" {
*/
#define RETURN_ERROR_IF(cond, err, ...) \
if (cond) { \
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
__FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
@ -75,7 +105,9 @@ extern "C" {
*/
#define RETURN_ERROR(err, ...) \
do { \
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
__FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
@ -90,7 +122,9 @@ extern "C" {
do { \
size_t const err_code = (err); \
if (ERR_isError(err_code)) { \
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
__FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return err_code; \
@ -128,6 +162,8 @@ static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
#define ZSTD_FRAMECHECKSUMSIZE 4
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
@ -191,10 +227,22 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
static void ZSTD_copy8(void* dst, const void* src) {
#ifdef __aarch64__
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
#else
memcpy(dst, src, 8);
#endif
}
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
static void ZSTD_copy16(void* dst, const void* src) {
#ifdef __aarch64__
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
#else
memcpy(dst, src, 16);
#endif
}
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
#define WILDCOPY_OVERLENGTH 32
@ -213,7 +261,7 @@ typedef enum {
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
* The src buffer must be before the dst buffer.
*/
MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
MEM_STATIC FORCE_INLINE_ATTR
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
{
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
@ -230,13 +278,18 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
} while (op < oend);
} else {
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
/* Separate out the first two COPY16() calls because the copy length is
/* Separate out the first COPY16() call because the copy length is
* almost certain to be short, so the branches have different
* probabilities.
* On gcc-9 unrolling once is +1.6%, twice is +2%, thrice is +1.8%.
* On clang-8 unrolling once is +1.4%, twice is +3.3%, thrice is +3%.
* probabilities. Since it is almost certain to be short, only do
* one COPY16() in the first call. Then, do two calls per loop since
* at that point it is more likely to have a high trip count.
*/
#ifndef __aarch64__
do {
COPY16(op, ip);
}
while (op < oend);
#else
COPY16(op, ip);
if (op >= oend) return;
do {
@ -244,9 +297,29 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
COPY16(op, ip);
}
while (op < oend);
#endif
}
}
MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
size_t const length = MIN(dstCapacity, srcSize);
if (length > 0) {
memcpy(dst, src, length);
}
return length;
}
/* define "workspace is too large" as this number of times larger than needed */
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
/* when workspace is continuously too large
* during at least this number of times,
* context's memory usage is considered wasteful,
* because it's sized to handle a worst case scenario which rarely happens.
* In which case, resize it down to free some memory */
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
/*-*******************************************
* Private declarations
@ -271,6 +344,31 @@ typedef struct {
U32 longLengthPos;
} seqStore_t;
typedef struct {
U32 litLength;
U32 matchLength;
} ZSTD_sequenceLength;
/**
* Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
* indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
*/
MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
{
ZSTD_sequenceLength seqLen;
seqLen.litLength = seq->litLength;
seqLen.matchLength = seq->matchLength + MINMATCH;
if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
if (seqStore->longLengthID == 1) {
seqLen.litLength += 0xFFFF;
}
if (seqStore->longLengthID == 2) {
seqLen.matchLength += 0xFFFF;
}
}
return seqLen;
}
/**
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
* Note: before using `compressedSize`, check for errors using ZSTD_isError().
@ -297,8 +395,7 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse(&r, val);
return (unsigned)r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */

View File

@ -1,35 +1,15 @@
/* ******************************************************************
FSE : Finite State Entropy encoder
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
* FSE : Finite State Entropy encoder
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
@ -37,14 +17,14 @@
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include "compiler.h"
#include "mem.h" /* U32, U16, etc. */
#include "debug.h" /* assert, DEBUGLOG */
#include "../common/compiler.h"
#include "../common/mem.h" /* U32, U16, etc. */
#include "../common/debug.h" /* assert, DEBUGLOG */
#include "hist.h" /* HIST_count_wksp */
#include "bitstream.h"
#include "../common/bitstream.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "error_private.h"
#include "../common/fse.h"
#include "../common/error_private.h"
/* **************************************************************
@ -645,9 +625,6 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` size must be `(1<<tableLog)`.

View File

@ -1,42 +1,22 @@
/* ******************************************************************
hist : Histogram functions
part of Finite State Entropy project
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
* hist : Histogram functions
* part of Finite State Entropy project
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* --- dependencies --- */
#include "mem.h" /* U32, BYTE, etc. */
#include "debug.h" /* assert, DEBUGLOG */
#include "error_private.h" /* ERROR */
#include "../common/mem.h" /* U32, BYTE, etc. */
#include "../common/debug.h" /* assert, DEBUGLOG */
#include "../common/error_private.h" /* ERROR */
#include "hist.h"

View File

@ -1,36 +1,16 @@
/* ******************************************************************
hist : Histogram functions
part of Finite State Entropy project
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
* hist : Histogram functions
* part of Finite State Entropy project
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* --- dependencies --- */

View File

@ -1,35 +1,15 @@
/* ******************************************************************
Huffman encoder, part of New Generation Entropy library
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
* Huffman encoder, part of New Generation Entropy library
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
@ -45,14 +25,14 @@
****************************************************************/
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
#include "compiler.h"
#include "bitstream.h"
#include "../common/compiler.h"
#include "../common/bitstream.h"
#include "hist.h"
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "fse.h" /* header compression */
#include "../common/fse.h" /* header compression */
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include "error_private.h"
#include "../common/huf.h"
#include "../common/error_private.h"
/* **************************************************************
@ -60,8 +40,6 @@
****************************************************************/
#define HUF_isError ERR_isError
#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
/* **************************************************************
@ -110,18 +88,18 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
/* Write table description header */
{ CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
{ CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) );
op += hSize;
}
/* Compress */
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
return op-ostart;
return (size_t)(op-ostart);
}
@ -169,7 +147,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
}
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
@ -192,9 +170,11 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
} }
/* fill nbBits */
*hasZeroWeights = 0;
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
*hasZeroWeights |= (w == 0);
CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
} }
/* fill val */
@ -240,7 +220,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
const U32 baseCost = 1 << (largestBits - maxNbBits);
U32 n = lastNonNull;
int n = (int)lastNonNull;
while (huffNode[n].nbBits > maxNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
@ -255,22 +235,22 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
/* repay normalized cost */
{ U32 const noSymbol = 0xF0F0F0F0;
U32 rankLast[HUF_TABLELOG_MAX+2];
int pos;
/* Get pos of last (smallest) symbol per rank */
memset(rankLast, 0xF0, sizeof(rankLast));
{ U32 currentNbBits = maxNbBits;
int pos;
for (pos=n ; pos >= 0; pos--) {
if (huffNode[pos].nbBits >= currentNbBits) continue;
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
rankLast[maxNbBits-currentNbBits] = pos;
rankLast[maxNbBits-currentNbBits] = (U32)pos;
} }
while (totalCost > 0) {
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 highPos = rankLast[nBitsToDecrease];
U32 lowPos = rankLast[nBitsToDecrease-1];
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
if (highPos == noSymbol) continue;
if (lowPos == noSymbol) break;
{ U32 const highTotal = huffNode[highPos].count;
@ -297,7 +277,8 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
while (huffNode[n].nbBits == maxNbBits) n--;
huffNode[n+1].nbBits--;
rankLast[1] = n+1;
assert(n >= 0);
rankLast[1] = (U32)(n+1);
totalCost++;
continue;
}
@ -309,29 +290,36 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
return maxNbBits;
}
typedef struct {
U32 base;
U32 current;
} rankPos;
static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
#define RANK_POSITION_TABLE_SIZE 32
typedef struct {
huffNodeTable huffNodeTbl;
rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
} HUF_buildCTable_wksp_tables;
static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
{
rankPos rank[32];
U32 n;
memset(rank, 0, sizeof(rank));
memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
for (n=0; n<=maxSymbolValue; n++) {
U32 r = BIT_highbit32(count[n] + 1);
rank[r].base ++;
rankPosition[r].base ++;
}
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
for (n=0; n<32; n++) rank[n].current = rank[n].base;
for (n=30; n>0; n--) rankPosition[n-1].base += rankPosition[n].base;
for (n=0; n<32; n++) rankPosition[n].current = rankPosition[n].base;
for (n=0; n<=maxSymbolValue; n++) {
U32 const c = count[n];
U32 const r = BIT_highbit32(c+1) + 1;
U32 pos = rank[r].current++;
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {
U32 pos = rankPosition[r].current++;
while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
huffNode[pos] = huffNode[pos-1];
pos--;
}
@ -343,45 +331,48 @@ static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValu
/** HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
*/
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{
nodeElt* const huffNode0 = (nodeElt*)workSpace;
HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
nodeElt* const huffNode = huffNode0+1;
U32 n, nonNullRank;
int nonNullRank;
int lowS, lowN;
U16 nodeNb = STARTNODE;
U32 nodeRoot;
int nodeNb = STARTNODE;
int n, nodeRoot;
/* safety checks */
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);
if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
return ERROR(workSpace_tooSmall);
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
return ERROR(maxSymbolValue_tooLarge);
memset(huffNode0, 0, sizeof(huffNodeTable));
/* sort, decreasing order */
HUF_sort(huffNode, count, maxSymbolValue);
HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
/* init for parents */
nonNullRank = maxSymbolValue;
nonNullRank = (int)maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
nodeNb++; lowS-=2;
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
/* create parents */
while (nodeNb <= nodeRoot) {
U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
huffNode[n1].parent = huffNode[n2].parent = nodeNb;
huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
nodeNb++;
}
@ -393,24 +384,25 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
/* enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
/* fill result into tree (val, nbBits) */
{ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
int const alphabetSize = (int)(maxSymbolValue + 1);
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
for (n=0; n<=nonNullRank; n++)
nbPerRank[huffNode[n].nbBits]++;
/* determine stating value per rank */
{ U16 min = 0;
for (n=maxNbBits; n>0; n--) {
for (n=(int)maxNbBits; n>0; n--) {
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
for (n=0; n<=maxSymbolValue; n++)
for (n=0; n<alphabetSize; n++)
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
for (n=0; n<=maxSymbolValue; n++)
for (n=0; n<alphabetSize; n++)
tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
}
@ -423,11 +415,11 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo
*/
size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
{
huffNodeTable nodeTable;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
HUF_buildCTable_wksp_tables workspace;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
}
static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
size_t nbBits = 0;
int s;
@ -437,7 +429,7 @@ static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count
return nbBits >> 3;
}
static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
int bad = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
@ -476,7 +468,7 @@ HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
{ size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
{ size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
n = srcSize & ~3; /* join to mod 4 */
@ -573,7 +565,8 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
if (srcSize < 12) return 0; /* no saving possible : too small input */
op += 6; /* jumpTable */
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
MEM_writeLE16(ostart, (U16)cSize);
@ -581,7 +574,8 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
MEM_writeLE16(ostart+2, (U16)cSize);
@ -589,7 +583,8 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
MEM_writeLE16(ostart+4, (U16)cSize);
@ -597,12 +592,14 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );
assert(op <= oend);
assert(ip <= iend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
if (cSize==0) return 0;
op += cSize;
}
return op-ostart;
return (size_t)(op-ostart);
}
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
@ -618,20 +615,21 @@ static size_t HUF_compressCTable_internal(
HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
{
size_t const cSize = (nbStreams==HUF_singleStream) ?
HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
/* check compressibility */
assert(op >= ostart);
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
return op-ostart;
return (size_t)(op-ostart);
}
typedef struct {
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
huffNodeTable nodeTable;
HUF_buildCTable_wksp_tables buildCTable_wksp;
} HUF_compress_tables_t;
/* HUF_compress_internal() :
@ -650,6 +648,8 @@ HUF_compress_internal (void* dst, size_t dstSize,
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
/* checks & inits */
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
@ -691,7 +691,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
table->nodeTable, sizeof(table->nodeTable));
&table->buildCTable_wksp, sizeof(table->buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -18,7 +18,7 @@
/*-*************************************
* Dependencies
***************************************/
#include "zstd_internal.h"
#include "../common/zstd_internal.h"
#include "zstd_cwksp.h"
#ifdef ZSTD_MULTITHREAD
# include "zstdmt_compress.h"
@ -166,6 +166,7 @@ typedef struct {
typedef struct {
ZSTD_window_t window; /* State for the window round buffer management */
ldmEntry_t* hashTable;
U32 loadedDictEnd;
BYTE* bucketOffsets; /* Next position in bucket to insert entry */
U64 hashPower; /* Used to compute the rolling hash.
* Depends on ldmParams.minMatchLength */
@ -249,6 +250,7 @@ struct ZSTD_CCtx_s {
size_t staticSize;
SeqCollector seqCollector;
int isFirstBlock;
int initialized;
seqStore_t seqStore; /* sequences storage ptrs */
ldmState_t ldmState; /* long distance matching state */
@ -324,6 +326,31 @@ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
}
typedef struct repcodes_s {
U32 rep[3];
} repcodes_t;
MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
{
repcodes_t newReps;
if (offset >= ZSTD_REP_NUM) { /* full offset */
newReps.rep[2] = rep[1];
newReps.rep[1] = rep[0];
newReps.rep[0] = offset - ZSTD_REP_MOVE;
} else { /* repcode */
U32 const repCode = offset + ll0;
if (repCode > 0) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
newReps.rep[1] = rep[0];
newReps.rep[0] = currentOffset;
} else { /* repCode == 0 */
memcpy(&newReps, rep, sizeof(newReps));
}
}
return newReps;
}
/* ZSTD_cParam_withinBounds:
* @return 1 if value is within cParam bounds,
* 0 otherwise */
@ -336,6 +363,30 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
return 1;
}
/* ZSTD_noCompressBlock() :
* Writes uncompressed block to dst buffer from given src.
* Returns the size of the block */
MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
{
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
dstSize_tooSmall, "dst buf too small for uncompressed block");
MEM_writeLE24(dst, cBlockHeader24);
memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
return ZSTD_blockHeaderSize + srcSize;
}
MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
{
BYTE* const op = (BYTE*)dst;
U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
MEM_writeLE24(op, cBlockHeader);
op[3] = src;
return 4;
}
/* ZSTD_minGain() :
* minimum compression required
* to generate a compress block or a compressed literals section.
@ -348,6 +399,21 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
return (srcSize >> minlog) + 2;
}
MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
{
switch (cctxParams->literalCompressionMode) {
case ZSTD_lcm_huffman:
return 0;
case ZSTD_lcm_uncompressed:
return 1;
default:
assert(0 /* impossible: pre-validated */);
/* fall-through */
case ZSTD_lcm_auto:
return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
}
}
/*! ZSTD_safecopyLiterals() :
* memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
* Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
@ -433,8 +499,7 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
unsigned long r = 0;
_BitScanForward64( &r, (U64)val );
return (unsigned)(r>>3);
return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_ctzll((U64)val) >> 3);
# else
@ -451,8 +516,7 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r=0;
_BitScanForward( &r, (U32)val );
return (unsigned)(r>>3);
return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_ctz((U32)val) >> 3);
# else
@ -467,8 +531,7 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
unsigned long r = 0;
_BitScanReverse64( &r, val );
return (unsigned)(r>>3);
return _BitScanReverse64( &r, val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_clzll(val) >> 3);
# else
@ -482,8 +545,7 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0;
_BitScanReverse( &r, (unsigned long)val );
return (unsigned)(r>>3);
return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_clz((U32)val) >> 3);
# else
@ -730,7 +792,10 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
*/
U32 const cycleMask = (1U << cycleLog) - 1;
U32 const current = (U32)((BYTE const*)src - window->base);
U32 const newCurrent = (current & cycleMask) + maxDist;
U32 const currentCycle0 = current & cycleMask;
/* Exclude zero so that newCurrent - maxDist >= 1. */
U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
U32 const newCurrent = currentCycle1 + maxDist;
U32 const correction = current - newCurrent;
assert((maxDist & cycleMask) == 0);
assert(current > newCurrent);
@ -739,8 +804,17 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
window->base += correction;
window->dictBase += correction;
window->lowLimit -= correction;
window->dictLimit -= correction;
if (window->lowLimit <= correction) window->lowLimit = 1;
else window->lowLimit -= correction;
if (window->dictLimit <= correction) window->dictLimit = 1;
else window->dictLimit -= correction;
/* Ensure we can still reference the full window. */
assert(newCurrent >= maxDist);
assert(newCurrent - maxDist >= 1);
/* Ensure that lowLimit and dictLimit didn't underflow. */
assert(window->lowLimit <= newCurrent);
assert(window->dictLimit <= newCurrent);
DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
window->lowLimit);
@ -844,6 +918,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
} } }
}
MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
memset(window, 0, sizeof(*window));
window->base = (BYTE const*)"";
window->dictBase = (BYTE const*)"";
window->dictLimit = 1; /* start from 1, so that 1st position is valid */
window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
window->nextSrc = window->base + 1; /* see issue #1241 */
}
/**
* ZSTD_window_update():
* Updates the window by appending [src, src + srcSize) to the window.
@ -857,6 +940,10 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
BYTE const* const ip = (BYTE const*)src;
U32 contiguous = 1;
DEBUGLOG(5, "ZSTD_window_update");
if (srcSize == 0)
return contiguous;
assert(window->base != NULL);
assert(window->dictBase != NULL);
/* Check if blocks follow each other */
if (src != window->nextSrc) {
/* not contiguous */
@ -867,7 +954,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
window->dictLimit = (U32)distanceFromBase;
window->dictBase = window->base;
window->base = ip - distanceFromBase;
// ms->nextToUpdate = window->dictLimit;
/* ms->nextToUpdate = window->dictLimit; */
if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
contiguous = 0;
}
@ -883,6 +970,9 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
return contiguous;
}
/**
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
@ -893,6 +983,19 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current
return matchLowest;
}
/**
* Returns the lowest allowed match index in the prefix.
*/
MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.dictLimit;
U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
U32 const isDictionary = (ms->loadedDictEnd != 0);
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
return matchLowest;
}
/* debug functions */
@ -931,6 +1034,21 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
}
#endif
/* ===============================================================
* Shared internal declarations
* These prototypes may be called from sources not in lib/compress
* =============================================================== */
/* ZSTD_loadCEntropy() :
* dict : must point at beginning of a valid zstd dictionary.
* return : size of dictionary header (size of magic number + dict ID + entropy tables)
* assumptions : magic number supposed already checked
* and dictSize >= 8 */
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
short* offcodeNCount, unsigned* offcodeMaxValue,
const void* const dict, size_t dictSize);
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
/* ==============================================================
* Private declarations
@ -940,6 +1058,7 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
/* ZSTD_getCParamsFromCCtxParams() :
* cParams are built depending on compressionLevel, src size hints,
* LDM and manually set compression parameters.
* Note: srcSizeHint == 0 means 0!
*/
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
@ -999,5 +1118,8 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
*/
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
/** ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
#endif /* ZSTD_COMPRESS_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -18,7 +18,7 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
BYTE* const ostart = (BYTE* const)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
switch(flSize)
{
@ -36,6 +36,7 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
}
memcpy(ostart + flSize, src, srcSize);
DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
return srcSize + flSize;
}
@ -62,6 +63,7 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void*
}
ostart[flSize] = *(const BYTE*)src;
DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
return flSize+1;
}
@ -80,8 +82,8 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
symbolEncodingType_e hType = set_compressed;
size_t cLitSize;
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
disableLiteralCompression);
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
disableLiteralCompression, (U32)srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
@ -102,14 +104,15 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
cLitSize = singleStream ?
HUF_compress1X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
255, 11, entropyWorkspace, entropyWorkspaceSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
HUF_compress4X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
255, 11, entropyWorkspace, entropyWorkspaceSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
DEBUGLOG(5, "Reusing previous huffman table");
hType = set_repeat;
}
}
@ -150,5 +153,6 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
default: /* not possible : lhSize is {3,4,5} */
assert(0);
}
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
return lhSize+cLitSize;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -60,7 +60,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
BYTE wksp[FSE_NCOUNTBOUND];
S16 norm[MaxSeq + 1];
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max), "");
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
}
@ -86,7 +86,7 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
* Returns the cost in bits of encoding the distribution in count using ctable.
* Returns an error if ctable cannot represent all the symbols in count.
*/
static size_t ZSTD_fseBitCost(
size_t ZSTD_fseBitCost(
FSE_CTable const* ctable,
unsigned const* count,
unsigned const max)
@ -96,18 +96,22 @@ static size_t ZSTD_fseBitCost(
unsigned s;
FSE_CState_t cstate;
FSE_initCState(&cstate, ctable);
RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
"Repeat FSE_CTable has maxSymbolValue %u < %u",
if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
ZSTD_getFSEMaxSymbolValue(ctable), max);
return ERROR(GENERIC);
}
for (s = 0; s <= max; ++s) {
unsigned const tableLog = cstate.stateLog;
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
if (count[s] == 0)
continue;
RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
"Repeat FSE_CTable has Prob[%u] == 0", s);
cost += count[s] * bitCost;
if (bitCost >= badCost) {
DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
return ERROR(GENERIC);
}
cost += (size_t)count[s] * bitCost;
}
return cost >> kAccuracyLog;
}
@ -117,7 +121,7 @@ static size_t ZSTD_fseBitCost(
* table described by norm. The max symbol support by norm is assumed >= max.
* norm must be valid for every symbol with non-zero probability in count.
*/
static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
unsigned const* count, unsigned const max)
{
unsigned const shift = 8 - accuracyLog;
@ -125,7 +129,7 @@ static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
unsigned s;
assert(accuracyLog <= 8);
for (s = 0; s <= max; ++s) {
unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
unsigned const norm256 = normAcc << shift;
assert(norm256 > 0);
assert(norm256 < 256);
@ -230,15 +234,15 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
switch (type) {
case set_rle:
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
*op = codeTable[0];
return 1;
case set_repeat:
memcpy(nextCTable, prevCTable, prevCTableSize);
return 0;
case set_basic:
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize)); /* note : could be pre-calculated */
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
return 0;
case set_compressed: {
S16 norm[MaxSeq + 1];
@ -249,14 +253,14 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
nbSeq_1--;
}
assert(nbSeq_1 > 1);
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max), "");
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(NCountSize);
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize));
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), "");
return NCountSize;
}
}
default: assert(0); RETURN_ERROR(GENERIC);
default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
}
}
@ -290,7 +294,7 @@ ZSTD_encodeSequences_body(
if (MEM_32bits()) BIT_flushBits(&blockStream);
if (longOffsets) {
U32 const ofBits = ofCodeTable[nbSeq-1];
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
BIT_flushBits(&blockStream);
@ -327,7 +331,7 @@ ZSTD_encodeSequences_body(
BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
if (longOffsets) {
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[n].offset, extraBits);
BIT_flushBits(&blockStream); /* (7)*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -11,8 +11,8 @@
#ifndef ZSTD_COMPRESS_SEQUENCES_H
#define ZSTD_COMPRESS_SEQUENCES_H
#include "fse.h" /* FSE_repeat, FSE_CTable */
#include "zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
typedef enum {
ZSTD_defaultDisallowed = 0,
@ -44,4 +44,11 @@ size_t ZSTD_encodeSequences(
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
size_t ZSTD_fseBitCost(
FSE_CTable const* ctable,
unsigned const* count,
unsigned const max);
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
unsigned const* count, unsigned const max);
#endif /* ZSTD_COMPRESS_SEQUENCES_H */

View File

@ -0,0 +1,845 @@
/*
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#include "zstd_compress_superblock.h"
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
#include "hist.h" /* HIST_countFast_wksp */
#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
/*-*************************************
* Superblock entropy buffer structs
***************************************/
/** ZSTD_hufCTablesMetadata_t :
* Stores Literals Block Type for a super-block in hType, and
* huffman tree description in hufDesBuffer.
* hufDesSize refers to the size of huffman tree description in bytes.
* This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
typedef struct {
symbolEncodingType_e hType;
BYTE hufDesBuffer[500]; /* TODO give name to this value */
size_t hufDesSize;
} ZSTD_hufCTablesMetadata_t;
/** ZSTD_fseCTablesMetadata_t :
* Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
* fse tables in fseTablesBuffer.
* fseTablesSize refers to the size of fse tables in bytes.
* This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
typedef struct {
symbolEncodingType_e llType;
symbolEncodingType_e ofType;
symbolEncodingType_e mlType;
BYTE fseTablesBuffer[500]; /* TODO give name to this value */
size_t fseTablesSize;
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
} ZSTD_fseCTablesMetadata_t;
typedef struct {
ZSTD_hufCTablesMetadata_t hufMetadata;
ZSTD_fseCTablesMetadata_t fseMetadata;
} ZSTD_entropyCTablesMetadata_t;
/** ZSTD_buildSuperBlockEntropy_literal() :
* Builds entropy for the super-block literals.
* Stores literals block type (raw, rle, compressed, repeat) and
* huffman description table to hufMetadata.
* @return : size of huffman description table or error code */
static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
const ZSTD_hufCTables_t* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_hufCTablesMetadata_t* hufMetadata,
const int disableLiteralsCompression,
void* workspace, size_t wkspSize)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
BYTE* const countWkspStart = wkspStart;
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
BYTE* const nodeWksp = countWkspStart + countWkspSize;
const size_t nodeWkspSize = wkspEnd-nodeWksp;
unsigned maxSymbolValue = 255;
unsigned huffLog = HUF_TABLELOG_DEFAULT;
HUF_repeat repeat = prevHuf->repeatMode;
DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (disableLiteralsCompression) {
DEBUGLOG(5, "set_basic - disabled");
hufMetadata->hType = set_basic;
return 0;
}
/* small ? don't even attempt compression (speed opt) */
# define COMPRESS_LITERALS_SIZE_MIN 63
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) {
DEBUGLOG(5, "set_basic - too small");
hufMetadata->hType = set_basic;
return 0;
}
}
/* Scan input and build symbol stats */
{ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
if (largest == srcSize) {
DEBUGLOG(5, "set_rle");
hufMetadata->hType = set_rle;
return 0;
}
if (largest <= (srcSize >> 7)+4) {
DEBUGLOG(5, "set_basic - no gain");
hufMetadata->hType = set_basic;
return 0;
}
}
/* Validate the previous Huffman table */
if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
repeat = HUF_repeat_none;
}
/* Build Huffman Tree */
memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
huffLog = (U32)maxBits;
{ /* Build and write the CTable */
size_t const newCSize = HUF_estimateCompressedSize(
(HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
size_t const hSize = HUF_writeCTable(
hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
(HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog);
/* Check against repeating the previous CTable */
if (repeat != HUF_repeat_none) {
size_t const oldCSize = HUF_estimateCompressedSize(
(HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
DEBUGLOG(5, "set_repeat - smaller");
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_repeat;
return 0;
}
}
if (newCSize + hSize >= srcSize) {
DEBUGLOG(5, "set_basic - no gains");
memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_basic;
return 0;
}
DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
hufMetadata->hType = set_compressed;
nextHuf->repeatMode = HUF_repeat_check;
return hSize;
}
}
}
/** ZSTD_buildSuperBlockEntropy_sequences() :
* Builds entropy for the super-block sequences.
* Stores symbol compression modes and fse table to fseMetadata.
* @return : size of fse tables or error code */
static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
const ZSTD_fseCTables_t* prevEntropy,
ZSTD_fseCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
BYTE* const countWkspStart = wkspStart;
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
BYTE* const cTableWksp = countWkspStart + countWkspSize;
const size_t cTableWkspSize = wkspEnd-cTableWksp;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
BYTE* const ostart = fseMetadata->fseTablesBuffer;
BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
BYTE* op = ostart;
assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
memset(workspace, 0, wkspSize);
fseMetadata->lastCountSize = 0;
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */
{ U32 LLtype;
unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
countWksp, max, mostFrequent, nbSeq,
LLFSELog, prevEntropy->litlengthCTable,
LL_defaultNorm, LL_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(set_basic < set_compressed && set_rle < set_compressed);
assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
cTableWksp, cTableWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
if (LLtype == set_compressed)
fseMetadata->lastCountSize = countSize;
op += countSize;
fseMetadata->llType = (symbolEncodingType_e) LLtype;
} }
/* build CTable for Offsets */
{ U32 Offtype;
unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
countWksp, max, mostFrequent, nbSeq,
OffFSELog, prevEntropy->offcodeCTable,
OF_defaultNorm, OF_defaultNormLog,
defaultPolicy, strategy);
assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
cTableWksp, cTableWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
if (Offtype == set_compressed)
fseMetadata->lastCountSize = countSize;
op += countSize;
fseMetadata->ofType = (symbolEncodingType_e) Offtype;
} }
/* build CTable for MatchLengths */
{ U32 MLtype;
unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
countWksp, max, mostFrequent, nbSeq,
MLFSELog, prevEntropy->matchlengthCTable,
ML_defaultNorm, ML_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
cTableWksp, cTableWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
if (MLtype == set_compressed)
fseMetadata->lastCountSize = countSize;
op += countSize;
fseMetadata->mlType = (symbolEncodingType_e) MLtype;
} }
assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
return op-ostart;
}
/** ZSTD_buildSuperBlockEntropy() :
* Builds entropy for the super-block.
* @return : 0 on success or error code */
static size_t
ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize)
{
size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
entropyMetadata->hufMetadata.hufDesSize =
ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
&prevEntropy->huf, &nextEntropy->huf,
&entropyMetadata->hufMetadata,
ZSTD_disableLiteralsCompression(cctxParams),
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
entropyMetadata->fseMetadata.fseTablesSize =
ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
&prevEntropy->fse, &nextEntropy->fse,
cctxParams,
&entropyMetadata->fseMetadata,
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
return 0;
}
/** ZSTD_compressSubBlock_literal() :
* Compresses literals section for a sub-block.
* When we have to write the Huffman table we will sometimes choose a header
* size larger than necessary. This is because we have to pick the header size
* before we know the table size + compressed size, so we have a bound on the
* table size. If we guessed incorrectly, we fall back to uncompressed literals.
*
* We write the header when writeEntropy=1 and set entropyWrriten=1 when we succeeded
* in writing the header, otherwise it is set to 0.
*
* hufMetadata->hType has literals block type info.
* If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
* If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
* If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
* @return : compressed size of literals section of a sub-block
* Or 0 if it unable to compress.
* Or error code */
static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
const BYTE* literals, size_t litSize,
void* dst, size_t dstSize,
const int bmi2, int writeEntropy, int* entropyWritten)
{
size_t const header = writeEntropy ? 200 : 0;
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart + lhSize;
U32 const singleStream = lhSize == 3;
symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
size_t cLitSize = 0;
(void)bmi2; /* TODO bmi2... */
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
*entropyWritten = 0;
if (litSize == 0 || hufMetadata->hType == set_basic) {
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
} else if (hufMetadata->hType == set_rle) {
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
}
assert(litSize > 0);
assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
if (writeEntropy && hufMetadata->hType == set_compressed) {
memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
op += hufMetadata->hufDesSize;
cLitSize += hufMetadata->hufDesSize;
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
}
/* TODO bmi2 */
{ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
op += cSize;
cLitSize += cSize;
if (cSize == 0 || ERR_isError(cSize)) {
DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
return 0;
}
/* If we expand and we aren't writing a header then emit uncompressed */
if (!writeEntropy && cLitSize >= litSize) {
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
}
/* If we are writing headers then allow expansion that doesn't change our header size. */
if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
assert(cLitSize > litSize);
DEBUGLOG(5, "Literals expanded beyond allowed header size");
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
}
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
}
/* Build header */
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
case 4: /* 2 - 2 - 14 - 14 */
{ U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
MEM_writeLE32(ostart, lhc);
break;
}
case 5: /* 2 - 2 - 18 - 18 */
{ U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
MEM_writeLE32(ostart, lhc);
ostart[4] = (BYTE)(cLitSize >> 10);
break;
}
default: /* not possible : lhSize is {3,4,5} */
assert(0);
}
*entropyWritten = 1;
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
return op-ostart;
}
static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
const seqDef* const sstart = sequences;
const seqDef* const send = sequences + nbSeq;
const seqDef* sp = sstart;
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
while (send-sp > 0) {
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
litLengthSum += seqLen.litLength;
matchLengthSum += seqLen.matchLength;
sp++;
}
assert(litLengthSum <= litSize);
if (!lastSequence) {
assert(litLengthSum == litSize);
}
return matchLengthSum + litSize;
}
/** ZSTD_compressSubBlock_sequences() :
* Compresses sequences section for a sub-block.
* fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
* symbol compression modes for the super-block.
* The first successfully compressed block will have these in its header.
* We set entropyWritten=1 when we succeed in compressing the sequences.
* The following sub-blocks will always have repeat mode.
* @return : compressed size of sequences section of a sub-block
* Or 0 if it is unable to compress
* Or error code. */
static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
const seqDef* sequences, size_t nbSeq,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const int bmi2, int writeEntropy, int* entropyWritten)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
BYTE* seqHead;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
*entropyWritten = 0;
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "");
if (nbSeq < 0x7F)
*op++ = (BYTE)nbSeq;
else if (nbSeq < LONGNBSEQ)
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
else
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
if (nbSeq==0) {
return op - ostart;
}
/* seqHead : flags for FSE encoding type */
seqHead = op++;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
if (writeEntropy) {
const U32 LLtype = fseMetadata->llType;
const U32 Offtype = fseMetadata->ofType;
const U32 MLtype = fseMetadata->mlType;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
op += fseMetadata->fseTablesSize;
} else {
const U32 repeat = set_repeat;
*seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, oend - op,
fseTables->matchlengthCTable, mlCode,
fseTables->offcodeCTable, ofCode,
fseTables->litlengthCTable, llCode,
sequences, nbSeq,
longOffsets, bmi2);
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
op += bitstreamSize;
/* zstd versions <= 1.3.4 mistakenly report corruption when
* FSE_readNCount() receives a buffer < 4 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1146.
* This can happen when the last set_compressed table present is 2
* bytes and the bitstream is only one byte.
* In this exceedingly rare case, we will simply emit an uncompressed
* block, since it isn't worth optimizing.
*/
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
assert(fseMetadata->lastCountSize + bitstreamSize == 3);
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
"emitting an uncompressed block.");
return 0;
}
#endif
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
}
/* zstd versions <= 1.4.0 mistakenly report error when
* sequences section body size is less than 3 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1664.
* This can happen when the previous sequences section block is compressed
* with rle mode and the current block's sequences section is compressed
* with repeat mode where sequences section body size can be 1 byte.
*/
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (op-seqHead < 4) {
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
"an uncompressed block when sequences are < 4 bytes");
return 0;
}
#endif
*entropyWritten = 1;
return op - ostart;
}
/** ZSTD_compressSubBlock() :
* Compresses a single sub-block.
* @return : compressed size of the sub-block
* Or 0 if it failed to compress. */
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
const seqDef* sequences, size_t nbSeq,
const BYTE* literals, size_t litSize,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const int bmi2,
int writeLitEntropy, int writeSeqEntropy,
int* litEntropyWritten, int* seqEntropyWritten,
U32 lastBlock)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart + ZSTD_blockHeaderSize;
DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
&entropyMetadata->hufMetadata, literals, litSize,
op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
if (cLitSize == 0) return 0;
op += cLitSize;
}
{ size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
&entropyMetadata->fseMetadata,
sequences, nbSeq,
llCode, mlCode, ofCode,
cctxParams,
op, oend-op,
bmi2, writeSeqEntropy, seqEntropyWritten);
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
if (cSeqSize == 0) return 0;
op += cSeqSize;
}
/* Write block header */
{ size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(ostart, cBlockHeader24);
}
return op-ostart;
}
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
const ZSTD_hufCTables_t* huf,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
unsigned* const countWksp = (unsigned*)workspace;
unsigned maxSymbolValue = 255;
size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
if (hufMetadata->hType == set_basic) return litSize;
else if (hufMetadata->hType == set_rle) return 1;
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
if (ZSTD_isError(largest)) return litSize;
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
return cLitSizeEstimate + literalSectionHeaderSize;
} }
assert(0); /* impossible */
return 0;
}
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
const BYTE* codeTable, unsigned maxCode,
size_t nbSeq, const FSE_CTable* fseCTable,
const U32* additionalBits,
short const* defaultNorm, U32 defaultNormLog,
void* workspace, size_t wkspSize)
{
unsigned* const countWksp = (unsigned*)workspace;
const BYTE* ctp = codeTable;
const BYTE* const ctStart = ctp;
const BYTE* const ctEnd = ctStart + nbSeq;
size_t cSymbolTypeSizeEstimateInBits = 0;
unsigned max = maxCode;
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
if (type == set_basic) {
cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
} else if (type == set_rle) {
cSymbolTypeSizeEstimateInBits = 0;
} else if (type == set_compressed || type == set_repeat) {
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
}
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
while (ctp < ctEnd) {
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
ctp++;
}
return cSymbolTypeSizeEstimateInBits / 8;
}
static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
size_t cSeqSizeEstimate = 0;
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
nbSeq, fseTables->offcodeCTable, NULL,
OF_defaultNorm, OF_defaultNormLog,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
nbSeq, fseTables->litlengthCTable, LL_bits,
LL_defaultNorm, LL_defaultNormLog,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
nbSeq, fseTables->matchlengthCTable, ML_bits,
ML_defaultNorm, ML_defaultNormLog,
workspace, wkspSize);
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize,
int writeLitEntropy, int writeSeqEntropy) {
size_t cSizeEstimate = 0;
cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
&entropy->huf, &entropyMetadata->hufMetadata,
workspace, wkspSize, writeLitEntropy);
cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
workspace, wkspSize, writeSeqEntropy);
return cSizeEstimate + ZSTD_blockHeaderSize;
}
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
{
if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
return 1;
if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
return 1;
if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
return 1;
return 0;
}
/** ZSTD_compressSubBlock_multi() :
* Breaks super-block into multiple sub-blocks and compresses them.
* Entropy will be written to the first block.
* The following blocks will use repeat mode to compress.
* All sub-blocks are compressed blocks (no raw or rle blocks).
* @return : compressed size of the super block (which is multiple ZSTD blocks)
* Or 0 if it failed to compress. */
static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
const ZSTD_compressedBlockState_t* prevCBlock,
ZSTD_compressedBlockState_t* nextCBlock,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const int bmi2, U32 lastBlock,
void* workspace, size_t wkspSize)
{
const seqDef* const sstart = seqStorePtr->sequencesStart;
const seqDef* const send = seqStorePtr->sequences;
const seqDef* sp = sstart;
const BYTE* const lstart = seqStorePtr->litStart;
const BYTE* const lend = seqStorePtr->lit;
const BYTE* lp = lstart;
BYTE const* ip = (BYTE const*)src;
BYTE const* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
const BYTE* llCodePtr = seqStorePtr->llCode;
const BYTE* mlCodePtr = seqStorePtr->mlCode;
const BYTE* ofCodePtr = seqStorePtr->ofCode;
size_t targetCBlockSize = cctxParams->targetCBlockSize;
size_t litSize, seqCount;
int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
int writeSeqEntropy = 1;
int lastSequence = 0;
DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
(unsigned)(lend-lp), (unsigned)(send-sstart));
litSize = 0;
seqCount = 0;
do {
size_t cBlockSizeEstimate = 0;
if (sstart == send) {
lastSequence = 1;
} else {
const seqDef* const sequence = sp + seqCount;
lastSequence = sequence == send - 1;
litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
seqCount++;
}
if (lastSequence) {
assert(lp <= lend);
assert(litSize <= (size_t)(lend - lp));
litSize = (size_t)(lend - lp);
}
/* I think there is an optimization opportunity here.
* Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
* since it recalculates estimate from scratch.
* For example, it would recount literal distribution and symbol codes everytime.
*/
cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
&nextCBlock->entropy, entropyMetadata,
workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
int litEntropyWritten = 0;
int seqEntropyWritten = 0;
const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
sp, seqCount,
lp, litSize,
llCodePtr, mlCodePtr, ofCodePtr,
cctxParams,
op, oend-op,
bmi2, writeLitEntropy, writeSeqEntropy,
&litEntropyWritten, &seqEntropyWritten,
lastBlock && lastSequence);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
if (cSize > 0 && cSize < decompressedSize) {
DEBUGLOG(5, "Committed the sub-block");
assert(ip + decompressedSize <= iend);
ip += decompressedSize;
sp += seqCount;
lp += litSize;
op += cSize;
llCodePtr += seqCount;
mlCodePtr += seqCount;
ofCodePtr += seqCount;
litSize = 0;
seqCount = 0;
/* Entropy only needs to be written once */
if (litEntropyWritten) {
writeLitEntropy = 0;
}
if (seqEntropyWritten) {
writeSeqEntropy = 0;
}
}
}
} while (!lastSequence);
if (writeLitEntropy) {
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
}
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
/* If we haven't written our entropy tables, then we've violated our contract and
* must emit an uncompressed block.
*/
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
return 0;
}
if (ip < iend) {
size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
assert(cSize != 0);
op += cSize;
/* We have to regenerate the repcodes because we've skipped some sequences */
if (sp < send) {
seqDef const* seq;
repcodes_t rep;
memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
}
memcpy(nextCBlock->rep, &rep, sizeof(rep));
}
}
DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
return op-ostart;
}
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
void const* src, size_t srcSize,
unsigned lastBlock) {
ZSTD_entropyCTablesMetadata_t entropyMetadata;
FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
&entropyMetadata,
zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
return ZSTD_compressSubBlock_multi(&zc->seqStore,
zc->blockState.prevCBlock,
zc->blockState.nextCBlock,
&entropyMetadata,
&zc->appliedParams,
dst, dstCapacity,
src, srcSize,
zc->bmi2, lastBlock,
zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_COMPRESS_ADVANCED_H
#define ZSTD_COMPRESS_ADVANCED_H
/*-*************************************
* Dependencies
***************************************/
#include "../zstd.h" /* ZSTD_CCtx */
/*-*************************************
* Target Compressed Block Size
***************************************/
/* ZSTD_compressSuperBlock() :
* Used to compress a super block when targetCBlockSize is being used.
* The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
void const* src, size_t srcSize,
unsigned lastBlock);
#endif /* ZSTD_COMPRESS_ADVANCED_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -14,7 +14,7 @@
/*-*************************************
* Dependencies
***************************************/
#include "zstd_internal.h"
#include "../common/zstd_internal.h"
#if defined (__cplusplus)
extern "C" {
@ -24,16 +24,6 @@ extern "C" {
* Constants
***************************************/
/* define "workspace is too large" as this number of times larger than needed */
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
/* when workspace is continuously too large
* during at least this number of times,
* context's memory usage is considered wasteful,
* because it's sized to handle a worst case scenario which rarely happens.
* In which case, resize it down to free some memory */
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
/* Since the workspace is effectively its own little malloc implementation /
* arena, when we run under ASAN, we should similarly insert redzones between
* each internal element of the workspace, so ASAN will catch overruns that
@ -468,7 +458,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
void* workspace = ZSTD_malloc(size, customMem);
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation);
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
ZSTD_cwksp_init(ws, workspace, size);
return 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -63,10 +63,8 @@ size_t ZSTD_compressBlock_doubleFast_generic(
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowestValid = ms->window.dictLimit;
const U32 maxDistance = 1U << cParams->windowLog;
/* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
@ -96,7 +94,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
dictCParams->hashLog : hBitsL;
const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
dictCParams->chainLog : hBitsS;
const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictStart);
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
@ -104,13 +102,15 @@ size_t ZSTD_compressBlock_doubleFast_generic(
/* if a dictionary is attached, it must be within window range */
if (dictMode == ZSTD_dictMatchState) {
assert(lowestValid + maxDistance >= endIndex);
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
}
/* init */
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
U32 const maxRep = (U32)(ip - prefixLowest);
U32 const current = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
U32 const maxRep = current - windowLow;
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
@ -198,6 +198,9 @@ size_t ZSTD_compressBlock_doubleFast_generic(
} }
ip += ((ip-anchor) >> kSearchStrength) + 1;
#if defined(__aarch64__)
PREFETCH_L1(ip+256);
#endif
continue;
_search_next_long:
@ -271,7 +274,7 @@ _match_stored:
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
&& repIndex2 < prefixLowestIndex ?
dictBase - dictIndexDelta + repIndex2 :
dictBase + repIndex2 - dictIndexDelta :
base + repIndex2;
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -15,7 +15,7 @@
extern "C" {
#endif
#include "mem.h" /* U32 */
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -61,9 +61,7 @@ ZSTD_compressBlock_fast_generic(
const BYTE* ip1;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 validStartIndex = ms->window.dictLimit;
const U32 prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex;
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
@ -74,12 +72,21 @@ ZSTD_compressBlock_fast_generic(
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
ip1 = ip0 + 1;
{ U32 const maxRep = (U32)(ip0 - prefixStart);
{ U32 const current = (U32)(ip0 - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
U32 const maxRep = current - windowLow;
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
/* Main Search Loop */
#ifdef __INTEL_COMPILER
/* From intel 'The vector pragma indicates that the loop should be
* vectorized if it is legal to do so'. Can be used together with
* #pragma ivdep (but have opted to exclude that because intel
* warns against using it).*/
#pragma vector always
#endif
while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
size_t mLength;
BYTE const* ip2 = ip0 + 2;
@ -91,19 +98,25 @@ ZSTD_compressBlock_fast_generic(
U32 const current1 = (U32)(ip1-base);
U32 const matchIndex0 = hashTable[h0];
U32 const matchIndex1 = hashTable[h1];
BYTE const* repMatch = ip2-offset_1;
BYTE const* repMatch = ip2 - offset_1;
const BYTE* match0 = base + matchIndex0;
const BYTE* match1 = base + matchIndex1;
U32 offcode;
#if defined(__aarch64__)
PREFETCH_L1(ip0+256);
#endif
hashTable[h0] = current0; /* update hash table */
hashTable[h1] = current1; /* update hash table */
assert(ip0 + 1 == ip1);
if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
ip0 = ip2 - mLength;
match0 = repMatch - mLength;
mLength += 4;
offcode = 0;
goto _match;
}
@ -128,19 +141,18 @@ _offset: /* Requires: ip0, match0 */
offset_2 = offset_1;
offset_1 = (U32)(ip0-match0);
offcode = offset_1 + ZSTD_REP_MOVE;
mLength = 0;
mLength = 4;
/* Count the backwards match length */
while (((ip0>anchor) & (match0>prefixStart))
&& (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
_match: /* Requires: ip0, match0, offcode */
/* Count the forward length */
mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
/* match found */
ip0 += mLength;
anchor = ip0;
ip1 = ip0 + 1;
if (ip0 <= ilimit) {
/* Fill Table */
@ -148,19 +160,18 @@ _match: /* Requires: ip0, match0, offcode */
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
while ( ((ip0 <= ilimit) & (offset_2>0)) /* offset_2==0 means offset_2 is invalidated */
&& (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
/* store sequence */
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
ip1 = ip0 + 1;
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
}
}
} } }
ip1 = ip0 + 1;
}
/* save reps for next block */
@ -387,7 +398,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic");
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
if (prefixStartIndex == dictStartIndex)
@ -404,6 +415,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
hashTable[h] = current; /* update hash table */
DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current);
assert(offset_1 <= current +1); /* check repIndex */
if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -15,7 +15,7 @@
extern "C" {
#endif
#include "mem.h" /* U32 */
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h"
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -660,12 +660,16 @@ ZSTD_compressBlock_lazy_generic(
const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
prefixLowestIndex - (U32)(dictEnd - dictBase) :
0;
const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
/* init */
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
U32 const maxRep = (U32)(ip - prefixLowest);
U32 const current = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, ms->cParams.windowLog);
U32 const maxRep = current - windowLow;
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
}
@ -677,6 +681,12 @@ ZSTD_compressBlock_lazy_generic(
}
/* Match Loop */
#if defined(__GNUC__) && defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
* code alignment is perturbed. To fix the instability align the loop on 32-bytes.
*/
__asm__(".p2align 5");
#endif
while (ip < ilimit) {
size_t matchLength=0;
size_t offset=0;
@ -929,11 +939,11 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const U32 dictLimit = ms->window.dictLimit;
const U32 lowestIndex = ms->window.lowLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const dictStart = dictBase + lowestIndex;
const BYTE* const dictStart = dictBase + ms->window.lowLimit;
const U32 windowLog = ms->cParams.windowLog;
typedef size_t (*searchMax_f)(
ZSTD_matchState_t* ms,
@ -942,10 +952,18 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
U32 offset_1 = rep[0], offset_2 = rep[1];
DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
/* init */
ip += (ip == prefixStart);
/* Match Loop */
#if defined(__GNUC__) && defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
* code alignment is perturbed. To fix the instability align the loop on 32-bytes.
*/
__asm__(".p2align 5");
#endif
while (ip < ilimit) {
size_t matchLength=0;
size_t offset=0;
@ -953,10 +971,11 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
U32 current = (U32)(ip-base);
/* check repCode */
{ const U32 repIndex = (U32)(current+1 - offset_1);
{ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current+1, windowLog);
const U32 repIndex = (U32)(current+1 - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@ -983,10 +1002,11 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
current++;
/* check repCode */
if (offset) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current, windowLog);
const U32 repIndex = (U32)(current - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@ -1013,10 +1033,11 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
current++;
/* check repCode */
if (offset) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current, windowLog);
const U32 repIndex = (U32)(current - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@ -1057,10 +1078,12 @@ _storeSequence:
/* check immediate repcode */
while (ip <= ilimit) {
const U32 repIndex = (U32)((ip-base) - offset_2);
const U32 repCurrent = (U32)(ip-base);
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
const U32 repIndex = repCurrent - offset_2;
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,15 +1,16 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_ldm.h"
#include "debug.h"
#include "../common/debug.h"
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
@ -223,6 +224,20 @@ static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
return rollingHash;
}
void ZSTD_ldm_fillHashTable(
ldmState_t* state, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params)
{
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
if ((size_t)(iend - ip) >= params->minMatchLength) {
U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength);
ZSTD_ldm_fillLdmHashTable(
state, startingHash, ip, iend - params->minMatchLength, state->window.base,
params->hashLog - params->bucketSizeLog,
*params);
}
}
/** ZSTD_ldm_limitTableUpdate() :
*
@ -449,6 +464,8 @@ size_t ZSTD_ldm_generateSequences(
U32 const correction = ZSTD_window_correctOverflow(
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
/* invalidate dictionaries on overflow correction */
ldmState->loadedDictEnd = 0;
}
/* 2. We enforce the maximum offset allowed.
*
@ -457,8 +474,14 @@ size_t ZSTD_ldm_generateSequences(
* TODO: * Test the chunk size.
* * Try invalidation after the sequence generation and test the
* the offset against maxDist directly.
*
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
* that any offset used is valid at the END of the sequence, since it may
* be split into two sequences. This condition holds when using
* ZSTD_window_enforceMaxDist(), but if we move to checking offsets
* against maxDist directly, we'll have to carefully handle that case.
*/
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
ldmState, sequences, params, chunkStart, chunkSize);
@ -566,14 +589,13 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
if (sequence.offset == 0)
break;
assert(sequence.offset <= (1U << cParams->windowLog));
assert(ip + sequence.litLength + sequence.matchLength <= iend);
/* Fill tables for block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Run the block compressor */
DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength);
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);

View File

@ -1,10 +1,11 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_LDM_H
@ -15,7 +16,7 @@ extern "C" {
#endif
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
#include "zstd.h" /* ZSTD_CCtx, size_t */
#include "../zstd.h" /* ZSTD_CCtx, size_t */
/*-*************************************
* Long distance matching
@ -23,6 +24,10 @@ extern "C" {
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
void ZSTD_ldm_fillHashTable(
ldmState_t* state, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params);
/**
* ZSTD_ldm_generateSequences():
*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -249,40 +249,6 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
}
}
/* ZSTD_litLengthContribution() :
* @return ( cost(litlength) - cost(0) )
* this value can then be added to rawLiteralsCost()
* to provide a cost which is directly comparable to a match ending at same position */
static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
{
if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel);
/* dynamic statistics */
{ U32 const llCode = ZSTD_LLcode(litLength);
int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER)
+ (int)WEIGHT(optPtr->litLengthFreq[0], optLevel) /* note: log2litLengthSum cancel out */
- (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
#if 1
return contribution;
#else
return MAX(0, contribution); /* sometimes better, sometimes not ... */
#endif
}
}
/* ZSTD_literalsContribution() :
* creates a fake cost for the literals part of a sequence
* which can be compared to the ending cost of a match
* should a new match start at this position */
static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
const optState_t* const optPtr,
int optLevel)
{
int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
+ ZSTD_litLengthContribution(litLength, optPtr, optLevel);
return contribution;
}
/* ZSTD_getMatchPrice() :
* Provides the cost of the match part (offset + matchLength) of a sequence
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
@ -603,7 +569,10 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 repLen = 0;
assert(current >= dictLimit);
if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */
if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {
/* We must validate the repcode offset because when we're using a dictionary the
* valid offset range shrinks when the dictionary goes out of bounds.
*/
if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
}
} else { /* repIndex < dictLimit || repIndex >= current */
@ -799,30 +768,6 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
/*-*******************************
* Optimal parser
*********************************/
typedef struct repcodes_s {
U32 rep[3];
} repcodes_t;
static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
{
repcodes_t newReps;
if (offset >= ZSTD_REP_NUM) { /* full offset */
newReps.rep[2] = rep[1];
newReps.rep[1] = rep[0];
newReps.rep[0] = offset - ZSTD_REP_MOVE;
} else { /* repcode */
U32 const repCode = offset + ll0;
if (repCode > 0) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
newReps.rep[1] = rep[0];
newReps.rep[0] = currentOffset;
} else { /* repCode == 0 */
memcpy(&newReps, rep, sizeof(newReps));
}
}
return newReps;
}
static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
@ -839,7 +784,7 @@ listStats(const U32* table, int lastEltID)
int enb;
for (enb=0; enb < nbElts; enb++) {
(void)table;
//RAWLOG(2, "%3i:%3i, ", enb, table[enb]);
/* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
RAWLOG(2, "%4i,", table[enb]);
}
RAWLOG(2, " \n");
@ -894,7 +839,12 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
{ U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
opt[0].mlen = 0; /* means is_a_literal */
opt[0].litlen = litlen;
opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
/* We don't need to include the actual price of the literals because
* it is static for the duration of the forward pass, and is included
* in every price. We include the literal length to avoid negative
* prices when we subtract the previous literal length.
*/
opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
@ -923,7 +873,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
U32 const offset = matches[matchNb].off;
U32 const end = matches[matchNb].len;
repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
for ( ; pos <= end ; pos++ ) {
U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
U32 const sequencePrice = literalsPrice + matchPrice;
@ -933,8 +882,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
opt[pos].off = offset;
opt[pos].litlen = litlen;
opt[pos].price = sequencePrice;
ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
} }
last_pos = pos-1;
}
@ -961,7 +908,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
opt[cur].off = 0;
opt[cur].litlen = litlen;
opt[cur].price = price;
memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
} else {
DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
@ -969,6 +915,21 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
}
}
/* Set the repcodes of the current position. We must do it here
* because we rely on the repcodes of the 2nd to last sequence being
* correct to set the next chunks repcodes during the backward
* traversal.
*/
ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
assert(cur >= opt[cur].mlen);
if (opt[cur].mlen != 0) {
U32 const prev = cur - opt[cur].mlen;
repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
} else {
memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
}
/* last match must start at a minimum distance of 8 from oend */
if (inr > ilimit) continue;
@ -1009,7 +970,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
/* set prices using matches found at position == cur */
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
U32 const offset = matches[matchNb].off;
repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);
U32 const lastML = matches[matchNb].len;
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
U32 mlen;
@ -1029,8 +989,6 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
opt[pos].off = offset;
opt[pos].litlen = litlen;
opt[pos].price = price;
ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
} else {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
@ -1046,6 +1004,17 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
assert(opt[0].mlen == 0);
/* Set the next chunk's repcodes based on the repcodes of the beginning
* of the last match, and the last sequence. This avoids us having to
* update them while traversing the sequences.
*/
if (lastSequence.mlen != 0) {
repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
memcpy(rep, &reps, sizeof(reps));
} else {
memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
}
{ U32 const storeEnd = cur + 1;
U32 storeStart = storeEnd;
U32 seqPos = cur;
@ -1082,20 +1051,6 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
continue; /* will finish */
}
/* repcodes update : like ZSTD_updateRep(), but update in place */
if (offCode >= ZSTD_REP_NUM) { /* full offset */
rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = offCode - ZSTD_REP_MOVE;
} else { /* repcode */
U32 const repCode = offCode + (llen==0);
if (repCode) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
if (repCode >= 2) rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = currentOffset;
} }
assert(anchor + llen <= iend);
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
@ -1104,7 +1059,6 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
} }
ZSTD_setBasePrices(optStatePtr, optLevel);
}
} /* while (ip < ilimit) */
/* Return the last literals size */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -22,9 +22,9 @@
/* ====== Dependencies ====== */
#include <string.h> /* memcpy, memset */
#include <limits.h> /* INT_MAX, UINT_MAX */
#include "mem.h" /* MEM_STATIC */
#include "pool.h" /* threadpool */
#include "threading.h" /* mutex */
#include "../common/mem.h" /* MEM_STATIC */
#include "../common/pool.h" /* threadpool */
#include "../common/threading.h" /* mutex */
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
#include "zstd_ldm.h"
#include "zstdmt_compress.h"
@ -461,7 +461,13 @@ typedef struct {
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
} serialState_t;
static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
static int
ZSTDMT_serialState_reset(serialState_t* serialState,
ZSTDMT_seqPool* seqPool,
ZSTD_CCtx_params params,
size_t jobSize,
const void* dict, size_t const dictSize,
ZSTD_dictContentType_e dictContentType)
{
/* Adjust parameters */
if (params.ldmParams.enableLdm) {
@ -490,8 +496,7 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
/* Size the seq pool tables */
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
/* Reset the window */
ZSTD_window_clear(&serialState->ldmState.window);
serialState->ldmWindow = serialState->ldmState.window;
ZSTD_window_init(&serialState->ldmState.window);
/* Resize tables and output space if necessary. */
if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
ZSTD_free(serialState->ldmState.hashTable, cMem);
@ -506,7 +511,24 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
/* Zero the tables */
memset(serialState->ldmState.hashTable, 0, hashSize);
memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
/* Update window state and fill hash table with dict */
serialState->ldmState.loadedDictEnd = 0;
if (dictSize > 0) {
if (dictContentType == ZSTD_dct_rawContent) {
BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
ZSTD_window_update(&serialState->ldmState.window, dict, dictSize);
ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, &params.ldmParams);
serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
} else {
/* don't even load anything */
}
}
/* Initialize serialState's copy of ldmWindow. */
serialState->ldmWindow = serialState->ldmState.window;
}
serialState->params = params;
serialState->params.jobSize = (U32)jobSize;
return 0;
@ -1054,7 +1076,7 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
{
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
@ -1076,7 +1098,7 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
compressionLevel);
mtctx->params.compressionLevel = compressionLevel;
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0);
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0);
cParams.windowLog = saved_wlog;
mtctx->params.cParams = cParams;
}
@ -1235,7 +1257,8 @@ ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nb
/* ZSTDMT_compress_advanced_internal() :
* This is a blocking function : it will only give back control to caller after finishing its compression job.
*/
static size_t ZSTDMT_compress_advanced_internal(
static size_t
ZSTDMT_compress_advanced_internal(
ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -1267,10 +1290,11 @@ static size_t ZSTDMT_compress_advanced_internal(
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
/* LDM doesn't even try to load the dictionary in single-ingestion mode */
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize, NULL, 0, ZSTD_dct_auto))
return ERROR(memory_allocation);
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) , ""); /* only expands if necessary */
{ unsigned u;
for (u=0; u<nbJobs; u++) {
@ -1403,7 +1427,7 @@ size_t ZSTDMT_initCStream_internal(
/* init */
if (params.nbWorkers != mtctx->params.nbWorkers)
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
@ -1500,7 +1524,8 @@ size_t ZSTDMT_initCStream_internal(
mtctx->allJobsCompleted = 0;
mtctx->consumed = 0;
mtctx->produced = 0;
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
dict, dictSize, dictContentType))
return ERROR(memory_allocation);
return 0;
}
@ -1714,9 +1739,11 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
assert(mtctx->doneJobID < mtctx->nextJobID);
assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
if (toFlush > 0) {
memcpy((char*)output->dst + output->pos,
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
toFlush);
}
output->pos += toFlush;
mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
@ -1786,7 +1813,7 @@ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
BYTE const* const bufferStart = (BYTE const*)buffer.start;
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
BYTE const* const rangeStart = (BYTE const*)range.start;
BYTE const* const rangeEnd = rangeStart + range.size;
BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart;
if (rangeStart == NULL || bufferStart == NULL)
return 0;
@ -2060,7 +2087,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
size_t const jobSize = mtctx->inBuff.filled;
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
}
/* check for potential compressed data ready to be flushed */
@ -2074,7 +2101,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) , "");
/* recommended next input size : fill current input buffer */
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
@ -2091,7 +2118,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* ou
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
(U32)srcSize, (U32)endFrame);
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) , "");
}
/* check if there is any data available to flush */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -40,7 +40,7 @@
/* === Dependencies === */
#include <stddef.h> /* size_t */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
/* === Constants === */

View File

@ -1,47 +1,27 @@
/* ******************************************************************
huff0 huffman decoder,
part of Finite State Entropy library
Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
* huff0 huffman decoder,
* part of Finite State Entropy library
* Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
* Dependencies
****************************************************************/
#include <string.h> /* memcpy, memset */
#include "compiler.h"
#include "bitstream.h" /* BIT_* */
#include "fse.h" /* to compress headers */
#include "../common/compiler.h"
#include "../common/bitstream.h" /* BIT_* */
#include "../common/fse.h" /* to compress headers */
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include "error_private.h"
#include "../common/huf.h"
#include "../common/error_private.h"
/* **************************************************************
* Macros
@ -61,9 +41,6 @@
* Error Management
****************************************************************/
#define HUF_isError ERR_isError
#ifndef CHECK_F
#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
#endif
/* **************************************************************
@ -181,17 +158,29 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize
/* fill DTable */
{ U32 n;
for (n=0; n<nbSymbols; n++) {
U32 const w = huffWeight[n];
U32 const length = (1 << w) >> 1;
U32 u;
size_t const nEnd = nbSymbols;
for (n=0; n<nEnd; n++) {
size_t const w = huffWeight[n];
size_t const length = (1 << w) >> 1;
size_t const uStart = rankVal[w];
size_t const uEnd = uStart + length;
size_t u;
HUF_DEltX1 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (u = rankVal[w]; u < rankVal[w] + length; u++)
dt[u] = D;
rankVal[w] += length;
} }
D.byte = (BYTE)n;
D.nbBits = (BYTE)(tableLog + 1 - w);
rankVal[w] = (U32)uEnd;
if (length < 4) {
/* Use length in the loop bound so the compiler knows it is short. */
for (u = 0; u < length; ++u)
dt[uStart + u] = D;
} else {
/* Unroll the loop 4 times, we know it is a power of 2. */
for (u = uStart; u < uEnd; u += 4) {
dt[u + 0] = D;
dt[u + 1] = D;
dt[u + 2] = D;
dt[u + 3] = D;
} } } }
return iSize;
}
@ -282,6 +271,7 @@ HUF_decompress4X1_usingDTable_internal_body(
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* const olimit = oend - 3;
const void* const dtPtr = DTable + 1;
const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
@ -306,9 +296,9 @@ HUF_decompress4X1_usingDTable_internal_body(
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal = BIT_DStream_unfinished;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
U32 endSignal = 1;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
@ -317,8 +307,7 @@ HUF_decompress4X1_usingDTable_internal_body(
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
/* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
for ( ; (endSignal) & (op4 < olimit) ; ) {
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
@ -335,10 +324,10 @@ HUF_decompress4X1_usingDTable_internal_body(
HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
BIT_reloadDStream(&bitD1);
BIT_reloadDStream(&bitD2);
BIT_reloadDStream(&bitD3);
BIT_reloadDStream(&bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
}
/* check corruption */
@ -757,7 +746,6 @@ HUF_decompress1X2_usingDTable_internal_body(
return dstSize;
}
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X2_usingDTable_internal_body(
void* dst, size_t dstSize,
@ -769,6 +757,7 @@ HUF_decompress4X2_usingDTable_internal_body(
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* const olimit = oend - (sizeof(size_t)-1);
const void* const dtPtr = DTable+1;
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
@ -793,7 +782,7 @@ HUF_decompress4X2_usingDTable_internal_body(
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
U32 endSignal = 1;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
@ -804,8 +793,29 @@ HUF_decompress4X2_usingDTable_internal_body(
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
for ( ; (endSignal) & (op4 < olimit); ) {
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
#else
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
@ -822,8 +832,12 @@ HUF_decompress4X2_usingDTable_internal_body(
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
endSignal = (U32)LIKELY(
(BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
#endif
}
/* check corruption */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -15,17 +15,17 @@
* Dependencies
*********************************************************/
#include <string.h> /* memcpy, memmove, memset */
#include "cpu.h" /* bmi2 */
#include "mem.h" /* low level memory routines */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include "../common/huf.h"
#include "zstd_decompress_internal.h"
#include "zstd_ddict.h"
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
# include "zstd_legacy.h"
# include "../legacy/zstd_legacy.h"
#endif
@ -65,6 +65,10 @@ void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
dctx->virtualStart = ddict->dictContent;
dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
dctx->previousDstEnd = dctx->dictEnd;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentBeginForFuzzing = dctx->prefixStart;
dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
#endif
if (ddict->entropyPresent) {
dctx->litEntropy = 1;
dctx->fseEntropy = 1;
@ -107,7 +111,7 @@ ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
/* load entropy tables */
RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
&ddict->entropy, ddict->dictContent, ddict->dictSize)),
dictionary_corrupted);
dictionary_corrupted, "");
ddict->entropyPresent = 1;
return 0;
}
@ -133,7 +137,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
/* parse dictionary content */
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
return 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -16,7 +16,7 @@
* Dependencies
*********************************************************/
#include <stddef.h> /* size_t */
#include "zstd.h" /* ZSTD_DDict, and several public functions */
#include "../zstd.h" /* ZSTD_DDict, and several public functions */
/*-*******************************************************

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -56,19 +56,19 @@
* Dependencies
*********************************************************/
#include <string.h> /* memcpy, memmove, memset */
#include "cpu.h" /* bmi2 */
#include "mem.h" /* low level memory routines */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include "zstd_internal.h" /* blockProperties_t */
#include "../common/huf.h"
#include "../common/zstd_internal.h" /* blockProperties_t */
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
# include "zstd_legacy.h"
# include "../legacy/zstd_legacy.h"
#endif
@ -111,7 +111,12 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
dctx->legacyContext = NULL;
dctx->previousLegacyVersion = 0;
dctx->noForwardProgress = 0;
dctx->oversizedDuration = 0;
dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
dctx->outBufferMode = ZSTD_obm_buffered;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentEndForFuzzing = NULL;
#endif
}
ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
@ -208,7 +213,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
{
size_t const minInputSize = ZSTD_startingInputLength(format);
RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
{ BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
U32 const dictID= fhd & 3;
@ -256,7 +261,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
zfhPtr->frameType = ZSTD_skippableFrame;
return 0;
}
RETURN_ERROR(prefix_unknown);
RETURN_ERROR(prefix_unknown, "");
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
@ -280,7 +285,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
if (!singleSegment) {
BYTE const wlByte = ip[pos++];
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
windowSize = (1ULL << windowLog);
windowSize += (windowSize >> 3) * (wlByte&7);
}
@ -352,14 +357,14 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
U32 sizeU32;
RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
frameParameter_unsupported);
frameParameter_unsupported, "");
{
size_t const skippableSize = skippableHeaderSize + sizeU32;
RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong);
RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
return skippableSize;
}
}
@ -439,7 +444,7 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
* harder.
*/
RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
dictionary_wrong);
dictionary_wrong, "");
#endif
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
return 0;
@ -559,17 +564,6 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
* Frame decoding
***************************************************************/
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
{
if (dst != dctx->previousDstEnd) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
dctx->prefixStart = dst;
dctx->previousDstEnd = dst;
}
}
/** ZSTD_insertBlock() :
* insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
@ -587,9 +581,9 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
DEBUGLOG(5, "ZSTD_copyRawBlock");
if (dst == NULL) {
if (srcSize == 0) return 0;
RETURN_ERROR(dstBuffer_null);
RETURN_ERROR(dstBuffer_null, "");
}
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
memcpy(dst, src, srcSize);
return srcSize;
}
@ -600,9 +594,9 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
{
if (dst == NULL) {
if (regenSize == 0) return 0;
RETURN_ERROR(dstBuffer_null);
RETURN_ERROR(dstBuffer_null, "");
}
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
memset(dst, b, regenSize);
return regenSize;
}
@ -618,7 +612,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
{
const BYTE* ip = (const BYTE*)(*srcPtr);
BYTE* const ostart = (BYTE* const)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
BYTE* op = ostart;
size_t remainingSrcSize = *srcSizePtr;
@ -627,15 +621,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
/* check */
RETURN_ERROR_IF(
remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
srcSize_wrong);
srcSize_wrong, "");
/* Frame Header */
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
srcSize_wrong);
FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
srcSize_wrong, "");
FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
}
@ -648,7 +642,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
ip += ZSTD_blockHeaderSize;
remainingSrcSize -= ZSTD_blockHeaderSize;
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
switch(blockProperties.blockType)
{
@ -663,13 +657,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
break;
case bt_reserved :
default:
RETURN_ERROR(corruption_detected);
RETURN_ERROR(corruption_detected, "invalid block type");
}
if (ZSTD_isError(decodedSize)) return decodedSize;
if (dctx->fParams.checksumFlag)
XXH64_update(&dctx->xxhState, op, decodedSize);
if (decodedSize != 0)
op += decodedSize;
assert(ip != NULL);
ip += cBlockSize;
remainingSrcSize -= cBlockSize;
if (blockProperties.lastBlock) break;
@ -677,14 +673,14 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
corruption_detected);
corruption_detected, "");
}
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
U32 checkRead;
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
checkRead = MEM_readLE32(ip);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
ip += 4;
remainingSrcSize -= 4;
}
@ -741,7 +737,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
(unsigned)magicNumber, ZSTD_MAGICNUMBER);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
FORWARD_IF_ERROR(skippableSize);
FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
@ -751,11 +747,11 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
if (ddict) {
/* we were called from ZSTD_decompress_usingDDict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
} else {
/* this will initialize correctly with no dict if dict == NULL, so
* use this in all cases but ddict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
}
ZSTD_checkContinuity(dctx, dst);
@ -776,6 +772,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
"error.");
if (ZSTD_isError(res)) return res;
assert(res <= dstCapacity);
if (res != 0)
dst = (BYTE*)dst + res;
dstCapacity -= res;
}
@ -824,7 +821,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
size_t regenSize;
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
RETURN_ERROR_IF(dctx==NULL, memory_allocation);
RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTD_freeDCtx(dctx);
return regenSize;
@ -842,6 +839,24 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
****************************************/
size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
/**
* Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
* we allow taking a partial block as the input. Currently only raw uncompressed blocks can
* be streamed.
*
* For blocks that can be streamed, this allows us to reduce the latency until we produce
* output, and avoid copying the input.
*
* @param inputSize - The total amount of input that the caller currently has.
*/
static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
return dctx->expected;
if (dctx->bType != bt_raw)
return dctx->expected;
return MIN(MAX(inputSize, 1), dctx->expected);
}
ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
switch(dctx->stage)
{
@ -874,7 +889,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */
RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
switch (dctx->stage)
@ -899,7 +914,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_decodeFrameHeader:
assert(src != NULL);
memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
dctx->expected = ZSTD_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
return 0;
@ -941,29 +956,41 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case bt_compressed:
DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
dctx->expected = 0; /* Streaming not supported */
break;
case bt_raw :
assert(srcSize <= dctx->expected);
rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
assert(rSize == srcSize);
dctx->expected -= rSize;
break;
case bt_rle :
rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
dctx->expected = 0; /* Streaming not supported */
break;
case bt_reserved : /* should never happen */
default:
RETURN_ERROR(corruption_detected);
RETURN_ERROR(corruption_detected, "invalid block type");
}
if (ZSTD_isError(rSize)) return rSize;
FORWARD_IF_ERROR(rSize, "");
RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
dctx->decodedSize += rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
dctx->previousDstEnd = (char*)dst + rSize;
/* Stay on the same stage until we are finished streaming the block. */
if (dctx->expected > 0) {
return rSize;
}
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
RETURN_ERROR_IF(
dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& dctx->decodedSize != dctx->fParams.frameContentSize,
corruption_detected);
corruption_detected, "");
if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
@ -974,7 +1001,6 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
} else {
dctx->stage = ZSTDds_decodeBlockHeader;
dctx->expected = ZSTD_blockHeaderSize;
dctx->previousDstEnd = (char*)dst + rSize;
}
return rSize;
}
@ -984,7 +1010,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
RETURN_ERROR_IF(check32 != h32, checksum_wrong);
RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
@ -1005,7 +1031,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC); /* some compiler require default to do something */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
}
}
@ -1016,6 +1042,10 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
dctx->prefixStart = dict;
dctx->previousDstEnd = (const char*)dict + dictSize;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentBeginForFuzzing = dctx->prefixStart;
dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
#endif
return 0;
}
@ -1029,7 +1059,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
dictPtr += 8; /* skip header = magic + dictID */
@ -1048,16 +1078,16 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
dictPtr, dictEnd - dictPtr,
workspace, workspaceSize);
#endif
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
dictPtr += hSize;
}
{ short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->OFTable,
offcodeNCount, offcodeMaxValue,
OF_base, OF_bits,
@ -1068,9 +1098,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->MLTable,
matchlengthNCount, matchlengthMaxValue,
ML_base, ML_bits,
@ -1081,9 +1111,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->LLTable,
litlengthNCount, litlengthMaxValue,
LL_base, LL_bits,
@ -1091,13 +1121,13 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
dictPtr += litlengthHeaderSize;
}
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
{ int i;
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
for (i=0; i<3; i++) {
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
dictionary_corrupted);
dictionary_corrupted, "");
entropy->rep[i] = rep;
} }
@ -1115,7 +1145,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
/* load entropy tables */
{ size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
dict = (const char*)dict + eSize;
dictSize -= eSize;
}
@ -1138,6 +1168,7 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
dctx->bType = bt_reserved;
ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
dctx->LLTptr = dctx->entropy.LLTable;
@ -1149,11 +1180,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
if (dict && dictSize)
RETURN_ERROR_IF(
ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
dictionary_corrupted);
dictionary_corrupted, "");
return 0;
}
@ -1172,7 +1203,7 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
DEBUGLOG(4, "DDict is %s",
dctx->ddictIsCold ? "~cold~" : "hot!");
}
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
if (ddict) { /* NULL ddict is equivalent to no dictionary */
ZSTD_copyDDictParameters(dctx, ddict);
}
@ -1263,11 +1294,11 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (dict && dictSize != 0) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
dctx->ddict = dctx->ddictLocal;
dctx->dictUses = ZSTD_use_indefinitely;
}
@ -1286,7 +1317,7 @@ size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSi
size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
dctx->dictUses = ZSTD_use_once;
return 0;
}
@ -1303,8 +1334,8 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
return ZSTD_startingInputLength(zds->format);
}
@ -1320,8 +1351,8 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
return ZSTD_startingInputLength(dctx->format);
}
@ -1330,14 +1361,14 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
return ZSTD_startingInputLength(dctx->format);
}
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (ddict) {
dctx->ddict = ddict;
@ -1354,9 +1385,9 @@ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
size_t const min = (size_t)1 << bounds.lowerBound;
size_t const max = (size_t)1 << bounds.upperBound;
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
dctx->maxWindowSize = maxWindowSize;
return 0;
}
@ -1379,6 +1410,10 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
return bounds;
case ZSTD_d_stableOutBuffer:
bounds.lowerBound = (int)ZSTD_obm_buffered;
bounds.upperBound = (int)ZSTD_obm_stable;
return bounds;
default:;
}
bounds.error = ERROR(parameter_unsupported);
@ -1398,12 +1433,12 @@ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
}
#define CHECK_DBOUNDS(p,v) { \
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
}
size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
switch(dParam) {
case ZSTD_d_windowLogMax:
if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
@ -1414,9 +1449,13 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
CHECK_DBOUNDS(ZSTD_d_format, value);
dctx->format = (ZSTD_format_e)value;
return 0;
case ZSTD_d_stableOutBuffer:
CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
dctx->outBufferMode = (ZSTD_outBufferMode_e)value;
return 0;
default:;
}
RETURN_ERROR(parameter_unsupported);
RETURN_ERROR(parameter_unsupported, "");
}
size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
@ -1428,7 +1467,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
dctx->format = ZSTD_f_zstd1;
dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
@ -1449,7 +1488,7 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
size_t const minRBSize = (size_t) neededSize;
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
frameParameter_windowTooLarge);
frameParameter_windowTooLarge, "");
return minRBSize;
}
@ -1467,30 +1506,94 @@ size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
ZSTD_frameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
RETURN_ERROR_IF(err>0, srcSize_wrong);
RETURN_ERROR_IF(err>0, srcSize_wrong, "");
RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
frameParameter_windowTooLarge);
frameParameter_windowTooLarge, "");
return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
}
/* ***** Decompression ***** */
MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
{
size_t const length = MIN(dstCapacity, srcSize);
memcpy(dst, src, length);
return length;
return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
}
static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
{
if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
zds->oversizedDuration++;
else
zds->oversizedDuration = 0;
}
static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
{
return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
}
/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
{
ZSTD_outBuffer const expect = zds->expectedOutBuffer;
/* No requirement when ZSTD_obm_stable is not enabled. */
if (zds->outBufferMode != ZSTD_obm_stable)
return 0;
/* Any buffer is allowed in zdss_init, this must be the same for every other call until
* the context is reset.
*/
if (zds->streamStage == zdss_init)
return 0;
/* The buffer must match our expectation exactly. */
if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
return 0;
RETURN_ERROR(dstBuffer_wrong, "ZSTD_obm_stable enabled but output differs!");
}
/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
* and updates the stage and the output buffer state. This call is extracted so it can be
* used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
* NOTE: You must break after calling this function since the streamStage is modified.
*/
static size_t ZSTD_decompressContinueStream(
ZSTD_DStream* zds, char** op, char* oend,
void const* src, size_t srcSize) {
int const isSkipFrame = ZSTD_isSkipFrame(zds);
if (zds->outBufferMode == ZSTD_obm_buffered) {
size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
size_t const decodedSize = ZSTD_decompressContinue(zds,
zds->outBuff + zds->outStart, dstSize, src, srcSize);
FORWARD_IF_ERROR(decodedSize, "");
if (!decodedSize && !isSkipFrame) {
zds->streamStage = zdss_read;
} else {
zds->outEnd = zds->outStart + decodedSize;
zds->streamStage = zdss_flush;
}
} else {
/* Write directly into the output buffer */
size_t const dstSize = isSkipFrame ? 0 : oend - *op;
size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
FORWARD_IF_ERROR(decodedSize, "");
*op += decodedSize;
/* Flushing is not needed. */
zds->streamStage = zdss_read;
assert(*op <= oend);
assert(zds->outBufferMode == ZSTD_obm_stable);
}
return 0;
}
size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
const char* const istart = (const char*)(input->src) + input->pos;
const char* const iend = (const char*)(input->src) + input->size;
const char* const src = (const char*)input->src;
const char* const istart = input->pos != 0 ? src + input->pos : src;
const char* const iend = input->size != 0 ? src + input->size : src;
const char* ip = istart;
char* const ostart = (char*)(output->dst) + output->pos;
char* const oend = (char*)(output->dst) + output->size;
char* const dst = (char*)output->dst;
char* const ostart = output->pos != 0 ? dst + output->pos : dst;
char* const oend = output->size != 0 ? dst + output->size : dst;
char* op = ostart;
U32 someMoreWork = 1;
@ -1506,6 +1609,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
"forbidden. out: pos: %u vs size: %u",
(U32)output->pos, (U32)output->size);
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
while (someMoreWork) {
switch(zds->streamStage)
@ -1516,6 +1620,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
zds->legacyVersion = 0;
zds->hostageByte = 0;
zds->expectedOutBuffer = *output;
/* fall-through */
case zdss_loadHeader :
@ -1543,7 +1648,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
"legacy support is incompatible with static dctx");
FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
zds->previousLegacyVersion, legacyVersion,
dict, dictSize));
dict, dictSize), "");
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
{ size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
@ -1570,7 +1675,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
} }
/* check for single-pass mode opportunity */
if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
if (cSize <= (size_t)(iend-istart)) {
@ -1586,15 +1692,23 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
break;
} }
/* Check output buffer is large enough for ZSTD_odm_stable. */
if (zds->outBufferMode == ZSTD_obm_stable
&& zds->fParams.frameType != ZSTD_skippableFrame
&& zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
}
/* Consume header (see ZSTDds_decodeFrameHeader) */
DEBUGLOG(4, "Consume header");
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
zds->expected = ZSTD_blockHeaderSize;
zds->stage = ZSTDds_decodeBlockHeader;
}
@ -1605,12 +1719,20 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
(U32)(zds->maxWindowSize >> 10) );
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge);
frameParameter_windowTooLarge, "");
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_obm_buffered
? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
: 0;
ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
{ int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
if (tooSmall || tooLarge) {
size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
DEBUGLOG(4, "inBuff : from %u to %u",
(U32)zds->inBuffSize, (U32)neededInBuffSize);
@ -1621,24 +1743,24 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
RETURN_ERROR_IF(
bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
memory_allocation);
memory_allocation, "");
} else {
ZSTD_free(zds->inBuff, zds->customMem);
zds->inBuffSize = 0;
zds->outBuffSize = 0;
zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
}
zds->inBuffSize = neededInBuffSize;
zds->outBuff = zds->inBuff + zds->inBuffSize;
zds->outBuffSize = neededOutBuffSize;
} }
} } }
zds->streamStage = zdss_read;
/* fall-through */
case zdss_read:
DEBUGLOG(5, "stage zdss_read");
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip);
DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
if (neededInSize==0) { /* end of frame */
zds->streamStage = zdss_init;
@ -1646,15 +1768,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
break;
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t const decodedSize = ZSTD_decompressContinue(zds,
zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
ip, neededInSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
ip += neededInSize;
if (!decodedSize && !isSkipFrame) break; /* this was just a header */
zds->outEnd = zds->outStart + decodedSize;
zds->streamStage = zdss_flush;
/* Function modifies the stage so we must break */
break;
} }
if (ip==iend) { someMoreWork = 0; break; } /* no more input */
@ -1666,6 +1782,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
size_t const toLoad = neededInSize - zds->inPos;
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
@ -1679,17 +1797,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
{ size_t const decodedSize = ZSTD_decompressContinue(zds,
zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
zds->inBuff, neededInSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
zds->inPos = 0; /* input is consumed */
if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */
zds->outEnd = zds->outStart + decodedSize;
} }
zds->streamStage = zdss_flush;
/* fall-through */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
/* Function modifies the stage so we must break */
break;
}
case zdss_flush:
{ size_t const toFlushSize = zds->outEnd - zds->outStart;
size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
@ -1712,17 +1824,21 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC); /* some compiler require default to do something */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
} }
/* result */
input->pos = (size_t)(ip - (const char*)(input->src));
output->pos = (size_t)(op - (char*)(output->dst));
/* Update the expected output buffer for ZSTD_obm_stable. */
zds->expectedOutBuffer = *output;
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
RETURN_ERROR_IF(ip==iend, srcSize_wrong);
RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
assert(0);
}
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -15,14 +15,14 @@
* Dependencies
*********************************************************/
#include <string.h> /* memcpy, memmove, memset */
#include "compiler.h" /* prefetch */
#include "cpu.h" /* bmi2 */
#include "mem.h" /* low level memory routines */
#include "../common/compiler.h" /* prefetch */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include "zstd_internal.h"
#include "../common/huf.h"
#include "../common/zstd_internal.h"
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h"
@ -56,7 +56,7 @@ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr)
{
RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
{ U32 const cBlockHeader = MEM_readLE24(src);
U32 const cSize = cBlockHeader >> 3;
@ -64,7 +64,7 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
bpPtr->origSize = cSize; /* only useful for RLE */
if (bpPtr->blockType == bt_rle) return 1;
RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
return cSize;
}
}
@ -80,7 +80,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
@ -89,7 +89,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
{
case set_repeat:
DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
/* fall-through */
case set_compressed:
@ -121,8 +121,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
/* prefetch huffman table if cold */
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
@ -160,7 +160,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
}
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@ -190,7 +190,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@ -222,7 +222,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@ -440,8 +440,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
switch(type)
{
case set_rle :
RETURN_ERROR_IF(!srcSize, srcSize_wrong);
RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
{ U32 const symbol = *(const BYTE*)src;
U32 const baseline = baseValue[symbol];
U32 const nbBits = nbAdditionalBits[symbol];
@ -453,7 +453,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
*DTablePtr = defaultTable;
return 0;
case set_repeat:
RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
/* prefetch FSE table if used */
if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
const void* const pStart = *DTablePtr;
@ -465,8 +465,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
{ unsigned tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
*DTablePtr = DTableSpace;
return headerSize;
@ -487,28 +487,28 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
/* check */
RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
/* SeqHead */
nbSeq = *ip++;
if (!nbSeq) {
*nbSeqPtr=0;
RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
return 1;
}
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
} else {
RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
/* FSE table descriptors */
RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong); /* minimum possible size: 1 byte for symbol encoding types */
RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
{ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
@ -521,7 +521,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += llhSize;
}
@ -531,7 +531,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += ofhSize;
}
@ -541,7 +541,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += mlhSize;
}
}
@ -580,7 +580,7 @@ typedef struct {
* Precondition: *ip <= *op
* Postcondition: *op - *op >= 8
*/
static void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
assert(*ip <= *op);
if (offset < 8) {
/* close range match, overlap */
@ -665,15 +665,15 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
/* bounds checks */
assert(oLitEnd < oMatchEnd);
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
/* bounds checks : careful of address space overflow in 32-bit mode */
RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
assert(op < op + sequenceLength);
assert(oLitEnd < op + sequenceLength);
/* copy literals */
ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
@ -683,7 +683,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
@ -709,16 +709,27 @@ size_t ZSTD_execSequence(BYTE* op,
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* Errors and uncommon cases handled here. */
assert(oLitEnd < oMatchEnd);
if (iLitEnd > litLimit || oMatchEnd > oend_w)
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
* - 32-bit mode and the match length overflows
*/
if (UNLIKELY(
iLitEnd > litLimit ||
oMatchEnd > oend_w ||
(MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(op <= oLitEnd /* No overflow */);
assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
assert(oMatchEnd <= oend /* No underflow */);
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
@ -729,7 +740,7 @@ size_t ZSTD_execSequence(BYTE* op,
*/
assert(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(op, (*litPtr));
if (sequence.litLength > 16) {
if (UNLIKELY(sequence.litLength > 16)) {
ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
}
op = oLitEnd;
@ -738,7 +749,7 @@ size_t ZSTD_execSequence(BYTE* op,
/* Copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
@ -760,7 +771,7 @@ size_t ZSTD_execSequence(BYTE* op,
/* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
* without overlap checking.
*/
if (sequence.offset >= WILDCOPY_VECLEN) {
if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
/* We bet on a full wildcopy for matches, since we expect matches to be
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
@ -802,6 +813,14 @@ ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
DStatePtr->state = DInfo.nextState + lowBits;
}
FORCE_INLINE_TEMPLATE void
ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
{
U32 const nbBits = DInfo.nbBits;
size_t const lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.nextState + lowBits;
}
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
* offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
* bits before reloading. This value is the maximum number of bytes we read
@ -813,25 +832,26 @@ ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
: 0)
typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
FORCE_INLINE_TEMPLATE seq_t
ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
{
seq_t seq;
U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
U32 const totalBits = llBits+mlBits+ofBits;
U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
U32 const llBase = llDInfo.baseValue;
U32 const mlBase = mlDInfo.baseValue;
U32 const ofBase = ofDInfo.baseValue;
BYTE const llBits = llDInfo.nbAdditionalBits;
BYTE const mlBits = mlDInfo.nbAdditionalBits;
BYTE const ofBits = ofDInfo.nbAdditionalBits;
BYTE const totalBits = llBits+mlBits+ofBits;
/* sequence */
{ size_t offset;
if (!ofBits)
offset = 0;
else {
if (ofBits > 1) {
ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
assert(ofBits <= MaxOff);
@ -845,59 +865,138 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
}
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
} else {
U32 const ll0 = (llBase == 0);
if (LIKELY((ofBits == 0))) {
if (LIKELY(!ll0))
offset = seqState->prevOffset[0];
else {
offset = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
}
if (ofBits <= 1) {
offset += (llBase==0);
if (offset) {
size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
} else {
offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
{ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} else { /* offset == 0 */
offset = seqState->prevOffset[0];
}
} else {
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
}
} } }
seq.offset = offset;
}
seq.matchLength = mlBase
+ ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */
seq.matchLength = mlBase;
if (mlBits > 0)
seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
BIT_reloadDStream(&seqState->DStream);
if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
BIT_reloadDStream(&seqState->DStream);
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
seq.litLength = llBase
+ ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */
seq.litLength = llBase;
if (llBits > 0)
seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
if (MEM_32bits())
BIT_reloadDStream(&seqState->DStream);
DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
/* ANS state update */
if (prefetch == ZSTD_p_prefetch) {
size_t const pos = seqState->pos + seq.litLength;
const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : no memory access will occur, offset is only used for prefetching */
seqState->pos = pos + seq.matchLength;
}
/* ANS state update
* gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
* clang-9.2.0 does 7% worse with ZSTD_updateFseState().
* Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
* better option, so it is the default for other compilers. But, if you
* measure that it is worse, please put up a pull request.
*/
{
#if defined(__GNUC__) && !defined(__clang__)
const int kUseUpdateFseState = 1;
#else
const int kUseUpdateFseState = 0;
#endif
if (kUseUpdateFseState) {
ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
} else {
ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */
}
}
return seq;
}
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
{
size_t const windowSize = dctx->fParams.windowSize;
/* No dictionary used. */
if (dctx->dictContentEndForFuzzing == NULL) return 0;
/* Dictionary is our prefix. */
if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
/* Dictionary is not our ext-dict. */
if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
/* Dictionary is not within our window size. */
if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
/* Dictionary is active. */
return 1;
}
MEM_STATIC void ZSTD_assertValidSequence(
ZSTD_DCtx const* dctx,
BYTE const* op, BYTE const* oend,
seq_t const seq,
BYTE const* prefixStart, BYTE const* virtualStart)
{
size_t const windowSize = dctx->fParams.windowSize;
size_t const sequenceSize = seq.litLength + seq.matchLength;
BYTE const* const oLitEnd = op + seq.litLength;
DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
assert(op <= oend);
assert((size_t)(oend - op) >= sequenceSize);
assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
/* Offset must be within the dictionary. */
assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
assert(seq.offset <= windowSize + dictSize);
} else {
/* Offset must be within our window. */
assert(seq.offset <= windowSize);
}
}
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
FORCE_INLINE_TEMPLATE size_t
DONT_VECTORIZE
ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
@ -910,47 +1009,105 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
DEBUGLOG(5, "ZSTD_decompressSequences_body");
(void)frame;
/* Regen sequences */
if (nbSeq) {
seqState_t seqState;
size_t error = 0;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
corruption_detected);
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
assert(dst != NULL);
ZSTD_STATIC_ASSERT(
BIT_DStream_unfinished < BIT_DStream_completed &&
BIT_DStream_endOfBuffer < BIT_DStream_completed &&
BIT_DStream_completed < BIT_DStream_overflow);
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
nbSeq--;
{ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
#if defined(__GNUC__) && defined(__x86_64__)
/* Align the decompression loop to 32 + 16 bytes.
*
* zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
* speed swings based on the alignment of the decompression loop. This
* performance swing is caused by parts of the decompression loop falling
* out of the DSB. The entire decompression loop should fit in the DSB,
* when it can't we get much worse performance. You can measure if you've
* hit the good case or the bad case with this perf command for some
* compressed file test.zst:
*
* perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
* -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
*
* If you see most cycles served out of the MITE you've hit the bad case.
* If you see most cycles served out of the DSB you've hit the good case.
* If it is pretty even then you may be in an okay case.
*
* I've been able to reproduce this issue on the following CPUs:
* - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
* Use Instruments->Counters to get DSB/MITE cycles.
* I never got performance swings, but I was able to
* go from the good case of mostly DSB to half of the
* cycles served from MITE.
* - Coffeelake: Intel i9-9900k
*
* I haven't been able to reproduce the instability or DSB misses on any
* of the following CPUS:
* - Haswell
* - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
* - Skylake
*
* If you are seeing performance stability this script can help test.
* It tests on 4 commits in zstd where I saw performance change.
*
* https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
*/
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 4");
#endif
for ( ; ; ) {
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
} }
BIT_reloadDStream(&(seqState.DStream));
/* gcc and clang both don't like early returns in this loop.
* gcc doesn't like early breaks either.
* Instead save an error and report it at the end.
* When there is an error, don't increment op, so we don't
* overwrite.
*/
if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize;
else op += oneSeqSize;
if (UNLIKELY(!--nbSeq)) break;
}
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
RETURN_ERROR_IF(nbSeq, corruption_detected);
RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected);
if (ZSTD_isError(error)) return error;
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
@ -959,99 +1116,21 @@ static size_t
ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
FORCE_INLINE_TEMPLATE seq_t
ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
{
seq_t seq;
U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
U32 const totalBits = llBits+mlBits+ofBits;
U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
/* sequence */
{ size_t offset;
if (!ofBits)
offset = 0;
else {
ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
assert(ofBits <= MaxOff);
if (MEM_32bits() && longOffsets) {
U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
} else {
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
}
}
if (ofBits <= 1) {
offset += (llBase==0);
if (offset) {
size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} else {
offset = seqState->prevOffset[0];
}
} else {
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
}
seq.offset = offset;
}
seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
BIT_reloadDStream(&seqState->DStream);
if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
BIT_reloadDStream(&seqState->DStream);
/* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
if (MEM_32bits())
BIT_reloadDStream(&seqState->DStream);
{ size_t const pos = seqState->pos + seq.litLength;
const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
seqState->pos = pos + seq.matchLength;
}
/* ANS state update */
ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
return seq;
}
FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequencesLong_body(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
@ -1063,6 +1142,7 @@ ZSTD_decompressSequencesLong_body(
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
(void)frame;
/* Regen sequences */
if (nbSeq) {
@ -1078,36 +1158,45 @@ ZSTD_decompressSequencesLong_body(
seqState.prefixStart = prefixStart;
seqState.pos = (size_t)(op-prefixStart);
seqState.dictEnd = dictEnd;
assert(dst != NULL);
assert(iend >= ip);
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
corruption_detected);
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
/* prepare in advance */
for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
/* finish queue */
seqNb -= seqAdvance;
for ( ; seqNb<nbSeq ; seqNb++) {
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
@ -1118,10 +1207,12 @@ ZSTD_decompressSequencesLong_body(
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
@ -1130,9 +1221,10 @@ static size_t
ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
@ -1146,9 +1238,10 @@ DONT_VECTORIZE
ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
@ -1157,9 +1250,10 @@ static TARGET_ATTRIBUTE("bmi2") size_t
ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
@ -1169,21 +1263,23 @@ typedef size_t (*ZSTD_decompressSequences_t)(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset);
const ZSTD_longOffset_e isLongOffset,
const int frame);
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
static size_t
ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
DEBUGLOG(5, "ZSTD_decompressSequences");
#if DYNAMIC_BMI2
if (dctx->bmi2) {
return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif
return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
@ -1198,15 +1294,16 @@ static size_t
ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
DEBUGLOG(5, "ZSTD_decompressSequencesLong");
#if DYNAMIC_BMI2
if (dctx->bmi2) {
return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif
return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
@ -1240,7 +1337,6 @@ ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
}
#endif
size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
@ -1256,7 +1352,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
@ -1282,6 +1378,8 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ip += seqHSize;
srcSize -= seqHSize;
RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
if ( !usePrefetchDecoder
@ -1300,17 +1398,28 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
if (usePrefetchDecoder)
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
/* else */
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
#endif
}
}
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
{
if (dst != dctx->previousDstEnd) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
dctx->prefixStart = dst;
dctx->previousDstEnd = dst;
}
}
size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -16,8 +16,8 @@
* Dependencies
*********************************************************/
#include <stddef.h> /* size_t */
#include "zstd.h" /* DCtx, and some public functions */
#include "zstd_internal.h" /* blockProperties_t, and some public functions */
#include "../zstd.h" /* DCtx, and some public functions */
#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */
#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -19,8 +19,8 @@
/*-*******************************************************
* Dependencies
*********************************************************/
#include "mem.h" /* BYTE, U16, U32 */
#include "zstd_internal.h" /* ZSTD_seqSymbol */
#include "../common/mem.h" /* BYTE, U16, U32 */
#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */
@ -95,6 +95,11 @@ typedef enum {
ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
} ZSTD_dictUses_e;
typedef enum {
ZSTD_obm_buffered = 0, /* Buffer the output */
ZSTD_obm_stable = 1 /* ZSTD_outBuffer is stable */
} ZSTD_outBufferMode_e;
struct ZSTD_DCtx_s
{
const ZSTD_seqSymbol* LLTptr;
@ -147,10 +152,19 @@ struct ZSTD_DCtx_s
U32 legacyVersion;
U32 hostageByte;
int noForwardProgress;
ZSTD_outBufferMode_e outBufferMode;
ZSTD_outBuffer expectedOutBuffer;
/* workspace */
BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
size_t oversizedDuration;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
void const* dictContentBeginForFuzzing;
void const* dictContentEndForFuzzing;
#endif
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
@ -160,7 +174,7 @@ struct ZSTD_DCtx_s
/*! ZSTD_loadDEntropy() :
* dict : must point at beginning of a valid zstd dictionary.
* @return : size of entropy tables read */
* @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
const void* const dict, size_t const dictSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@ -72,7 +72,7 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 4
#define ZSTD_VERSION_RELEASE 4
#define ZSTD_VERSION_RELEASE 5
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
@ -274,7 +274,10 @@ typedef enum {
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
* Note 1 : it's possible to pass a negative compression level.
* Note 2 : setting a level resets all other compression parameters to default */
* Note 2 : setting a level does not automatically set all other compression parameters
* to default. Setting this will however eventually dynamically impact the compression
* parameters which have not been manually set. The manually set
* ones will 'stick'. */
/* Advanced compression parameters :
* It's possible to pin down compression parameters to some specific values.
* In which case, these values are no longer dynamically selected by the compressor */
@ -519,11 +522,13 @@ typedef enum {
/* note : additional experimental parameters are also available
* within the experimental section of the API.
* At the time of this writing, they include :
* ZSTD_c_format
* ZSTD_d_format
* ZSTD_d_stableOutBuffer
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly
*/
ZSTD_d_experimentalParam1=1000
ZSTD_d_experimentalParam1=1000,
ZSTD_d_experimentalParam2=1001
} ZSTD_dParameter;
@ -763,7 +768,7 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
/* This function is redundant with the advanced API and equivalent to:
*
* ZSTD_DCtx_reset(zds);
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
* ZSTD_DCtx_refDDict(zds, NULL);
*/
ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
@ -1263,23 +1268,28 @@ ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
***************************************/
/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage of a future
* {D,C}Ctx, before its creation.
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
*
* ZSTD_estimateCCtxSize() will provide a budget large enough for any
* compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
* this estimate does not include space for a window buffer, so this estimate
* is guaranteed to be enough for single-shot compressions, but not streaming
* compressions. It will however assume the input may be arbitrarily large,
* which is the worst case. If srcSize is known to always be small,
* ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
* ZSTD_getCParams() to create cParams from compressionLevel.
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
* ZSTD_CCtxParams_setParameter().
* ZSTD_estimateCCtxSize() will provide a memory budget large enough
* for any compression level up to selected one.
* Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
* does not include space for a window buffer.
* Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
* The estimate will assume the input may be arbitrarily large,
* which is the worst case.
*
* Note: only single-threaded compression is supported. This function will
* return an error code if ZSTD_c_nbWorkers is >= 1. */
* When srcSize can be bound by a known and rather "small" value,
* this fact can be used to provide a tighter estimation
* because the CCtx compression context will need less memory.
* This tighter estimation can be provided by more advanced functions
* ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
* and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
* Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
*
* Note 2 : only single-threaded compression is supported.
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
*/
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
@ -1642,6 +1652,37 @@ ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowS
* allowing selection between ZSTD_format_e input compression formats
*/
#define ZSTD_d_format ZSTD_d_experimentalParam1
/* ZSTD_d_stableOutBuffer
* Experimental parameter.
* Default is 0 == disabled. Set to 1 to enable.
*
* Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
* between calls, except for the modifications that zstd makes to pos (the
* caller must not modify pos). This is checked by the decompressor, and
* decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
* MUST be large enough to fit the entire decompressed frame. This will be
* checked when the frame content size is known. The data in the ZSTD_outBuffer
* in the range [dst, dst + pos) MUST not be modified during decompression
* or you will get data corruption.
*
* When this flags is enabled zstd won't allocate an output buffer, because
* it can write directly to the ZSTD_outBuffer, but it will still allocate
* an input buffer large enough to fit any compressed block. This will also
* avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
* If you need to avoid the input buffer allocation use the buffer-less
* streaming API.
*
* NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
* this flag is ALWAYS memory safe, and will never access out-of-bounds
* memory. However, decompression WILL fail if you violate the preconditions.
*
* WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
* not be modified during decompression or you will get data corruption. This
* is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
* matches. Normally zstd maintains its own buffer for this purpose, but passing
* this flag tells zstd to use the user provided buffer.
*/
#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
/*! ZSTD_DCtx_setFormat() :
* Instruct the decoder context about what kind of data to decode next.