diff --git a/README.md b/README.md index 8ac6b01..f880357 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ convention of how/when to free c-mem, thus break the rule of [tecbot/gorocksdb]( ## Install -### Default - Builtin Static (Linux, Mac OS) +### Default - Builtin Static (Linux only) `grocksdb` contains built static version of `Rocksdb` with: - gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) @@ -31,7 +31,7 @@ go get -u github.com/linxGnu/grocksdb go build -tags builtin_static ``` -### Static lib (Linux, Mac OS) +### Static lib (Linux only) If you don't trust my builtin/want to build with your compiler/env: diff --git a/backup.go b/backup.go index 66c94de..bbe5bd8 100644 --- a/backup.go +++ b/backup.go @@ -38,6 +38,19 @@ func OpenBackupEngine(opts *Options, path string) (be *BackupEngine, err error) return } +// OpenBackupEngineWithOpt opens a backup engine with specified options. +func OpenBackupEngineWithOpt(opts *BackupableDBOptions, env *Env) (be *BackupEngine, err error) { + var cErr *C.char + bEngine := C.rocksdb_backup_engine_open_opts(opts.c, env.c, &cErr) + if err = fromCError(cErr); err == nil { + be = &BackupEngine{ + c: bEngine, + } + } + + return +} + // CreateBackupEngine opens a backup engine from DB. func CreateBackupEngine(db *DB) (be *BackupEngine, err error) { if be, err = OpenBackupEngine(db.opts, db.Name()); err == nil { diff --git a/backup_test.go b/backup_test.go index a639dbf..ee8b093 100644 --- a/backup_test.go +++ b/backup_test.go @@ -50,7 +50,19 @@ func TestBackupEngine(t *testing.T) { engine, err := CreateBackupEngine(db) require.Nil(t, err) - defer engine.Close() + defer func() { + engine.Close() + + // re-open with opts + opts := NewBackupableDBOptions(db.name) + env := NewDefaultEnv() + + _, err = OpenBackupEngineWithOpt(opts, env) + require.Nil(t, err) + + env.Destroy() + opts.Destroy() + }() t.Run("createBackupAndVerify", func(t *testing.T) { infos := engine.GetInfo() diff --git a/build.sh b/build.sh index 9abf923..0eb2b0e 100644 --- a/build.sh +++ b/build.sh @@ -11,24 +11,30 @@ mkdir -p $BUILD_PATH CMAKE_REQUIRED_PARAMS="-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}" -cd $BUILD_PATH && wget https://github.com/madler/zlib/archive/v1.2.11.tar.gz && tar xzf v1.2.11.tar.gz && cd zlib-1.2.11 && \ +zlib_version="1.2.11" +cd $BUILD_PATH && wget https://github.com/madler/zlib/archive/v${zlib_version}.tar.gz && tar xzf v${zlib_version}.tar.gz && cd zlib-${zlib_version} && \ ./configure --prefix=$INSTALL_PREFIX --static && make -j16 install && \ cd $BUILD_PATH && rm -rf * -cd $BUILD_PATH && wget https://github.com/google/snappy/archive/1.1.8.tar.gz && tar xzf 1.1.8.tar.gz && cd snappy-1.1.8 && \ +snappy_version="1.1.8" +cd $BUILD_PATH && wget https://github.com/google/snappy/archive/${snappy_version}.tar.gz && tar xzf ${snappy_version}.tar.gz && cd snappy-${snappy_version} && \ mkdir -p build_place && cd build_place && cmake $CMAKE_REQUIRED_PARAMS -DSNAPPY_BUILD_TESTS=OFF .. && make install/strip -j16 && \ cd $BUILD_PATH && rm -rf * -cd $BUILD_PATH && wget https://github.com/lz4/lz4/archive/v1.9.3.tar.gz && tar xzf v1.9.3.tar.gz && cd lz4-1.9.3/build/cmake && \ +lz4_version="1.9.3" +cd $BUILD_PATH && wget https://github.com/lz4/lz4/archive/v${lz4_version}.tar.gz && tar xzf v${lz4_version}.tar.gz && cd lz4-${lz4_version}/build/cmake && \ cmake $CMAKE_REQUIRED_PARAMS -DLZ4_BUILD_LEGACY_LZ4C=OFF -DBUILD_SHARED_LIBS=OFF -DLZ4_POSITION_INDEPENDENT_LIB=ON && make -j16 install && \ cd $BUILD_PATH && rm -rf * -cd $BUILD_PATH && wget https://github.com/facebook/zstd/releases/download/v1.4.5/zstd-1.4.5.tar.gz && tar xzf zstd-1.4.5.tar.gz && cd zstd-1.4.5/build/cmake && mkdir -p build_place && cd build_place && \ - cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DZSTD_BUILD_PROGRAMS=OFF -DZSTD_BUILD_CONTRIB=OFF -DZSTD_BUILD_STATIC=ON -DZSTD_BUILD_SHARED=OFF -DZSTD_BUILD_TESTS=OFF \ - $CMAKE_REQUIRED_PARAMS -DZSTD_ZLIB_SUPPORT=ON -DZSTD_LZMA_SUPPORT=OFF -DCMAKE_BUILD_TYPE=Release .. && make -j16 install && \ - cd $BUILD_PATH && rm -rf * +zstd_version="1.4.8" +cd $BUILD_PATH && wget https://github.com/facebook/zstd/archive/v${zstd_version}.tar.gz && tar xzf v${zstd_version}.tar.gz && \ + cd zstd-${zstd_version}/build/cmake && mkdir -p build_place && cd build_place && \ + cmake -DZSTD_BUILD_PROGRAMS=OFF -DZSTD_BUILD_CONTRIB=OFF -DZSTD_BUILD_STATIC=ON -DZSTD_BUILD_SHARED=OFF -DZSTD_BUILD_TESTS=OFF \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DZSTD_ZLIB_SUPPORT=ON -DZSTD_LZMA_SUPPORT=OFF -DCMAKE_BUILD_TYPE=Release .. && make -j$(nproc) install && \ + cd $BUILD_PATH && rm -rf * && ldconfig -cd $BUILD_PATH && wget https://github.com/facebook/rocksdb/archive/v6.14.6.tar.gz && tar xzf v6.14.6.tar.gz && cd rocksdb-6.14.6/ && \ +rocksdb_version="6.15.2" +cd $BUILD_PATH && wget https://github.com/facebook/rocksdb/archive/v${rocksdb_version}.tar.gz && tar xzf v${rocksdb_version}.tar.gz && cd rocksdb-${rocksdb_version}/ && \ mkdir -p build_place && cd build_place && cmake -DCMAKE_BUILD_TYPE=Release $CMAKE_REQUIRED_PARAMS -DCMAKE_PREFIX_PATH=$INSTALL_PREFIX -DWITH_TESTS=OFF -DWITH_GFLAGS=OFF \ -DWITH_BENCHMARK_TOOLS=OFF -DWITH_TOOLS=OFF -DWITH_MD_LIBRARY=OFF -DWITH_RUNTIME_DEBUG=OFF -DROCKSDB_BUILD_SHARED=OFF -DWITH_SNAPPY=ON -DWITH_LZ4=ON -DWITH_ZLIB=ON \ -DWITH_ZSTD=ON -DWITH_BZ2=OFF -WITH_GFLAGS=OFF .. && make -j16 install/strip && \ diff --git a/builtin_static_darwin_amd64.go b/builtin_static_darwin_amd64.go deleted file mode 100644 index e0f931e..0000000 --- a/builtin_static_darwin_amd64.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build builtin_static !static - -package grocksdb - -// #cgo CFLAGS: -I ${SRCDIR}/dist/darwin_amd64/include -// #cgo CXXFLAGS: -I ${SRCDIR}/dist/darwin_amd64/include -// #cgo LDFLAGS: -L${SRCDIR}/dist/darwin_amd64/lib -lrocksdb -pthread -lstdc++ -ldl -lm -lzstd -llz4 -lz -lsnappy -import "C" diff --git a/dist/darwin_amd64/include/cover.h b/dist/darwin_amd64/include/cover.h deleted file mode 100644 index f2aa0e3..0000000 --- a/dist/darwin_amd64/include/cover.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright (c) 2017-2020, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "../common/mem.h" /* read */ -#include "../common/pool.h" -#include "../common/threading.h" -#include "../common/zstd_internal.h" /* includes zstd.h */ -#ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY -#endif -#include "zdict.h" - -/** - * COVER_best_t is used for two purposes: - * 1. Synchronizing threads. - * 2. Saving the best parameters and dictionary. - * - * All of the methods except COVER_best_init() are thread safe if zstd is - * compiled with multithreaded support. - */ -typedef struct COVER_best_s { - ZSTD_pthread_mutex_t mutex; - ZSTD_pthread_cond_t cond; - size_t liveJobs; - void *dict; - size_t dictSize; - ZDICT_cover_params_t parameters; - size_t compressedSize; -} COVER_best_t; - -/** - * A segment is a range in the source as well as the score of the segment. - */ -typedef struct { - U32 begin; - U32 end; - U32 score; -} COVER_segment_t; - -/** - *Number of epochs and size of each epoch. - */ -typedef struct { - U32 num; - U32 size; -} COVER_epoch_info_t; - -/** - * Struct used for the dictionary selection function. - */ -typedef struct COVER_dictSelection { - BYTE* dictContent; - size_t dictSize; - size_t totalCompressedSize; -} COVER_dictSelection_t; - -/** - * Computes the number of epochs and the size of each epoch. - * We will make sure that each epoch gets at least 10 * k bytes. - * - * The COVER algorithms divide the data up into epochs of equal size and - * select one segment from each epoch. - * - * @param maxDictSize The maximum allowed dictionary size. - * @param nbDmers The number of dmers we are training on. - * @param k The parameter k (segment size). - * @param passes The target number of passes over the dmer corpus. - * More passes means a better dictionary. - */ -COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers, - U32 k, U32 passes); - -/** - * Warns the user when their corpus is too small. - */ -void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel); - -/** - * Checks total compressed size of a dictionary - */ -size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, - const size_t *samplesSizes, const BYTE *samples, - size_t *offsets, - size_t nbTrainSamples, size_t nbSamples, - BYTE *const dict, size_t dictBufferCapacity); - -/** - * Returns the sum of the sample sizes. - */ -size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ; - -/** - * Initialize the `COVER_best_t`. - */ -void COVER_best_init(COVER_best_t *best); - -/** - * Wait until liveJobs == 0. - */ -void COVER_best_wait(COVER_best_t *best); - -/** - * Call COVER_best_wait() and then destroy the COVER_best_t. - */ -void COVER_best_destroy(COVER_best_t *best); - -/** - * Called when a thread is about to be launched. - * Increments liveJobs. - */ -void COVER_best_start(COVER_best_t *best); - -/** - * Called when a thread finishes executing, both on error or success. - * Decrements liveJobs and signals any waiting threads if liveJobs == 0. - * If this dictionary is the best so far save it and its parameters. - */ -void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, - COVER_dictSelection_t selection); -/** - * Error function for COVER_selectDict function. Checks if the return - * value is an error. - */ -unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection); - - /** - * Error function for COVER_selectDict function. Returns a struct where - * return.totalCompressedSize is a ZSTD error. - */ -COVER_dictSelection_t COVER_dictSelectionError(size_t error); - -/** - * Always call after selectDict is called to free up used memory from - * newly created dictionary. - */ -void COVER_dictSelectionFree(COVER_dictSelection_t selection); - -/** - * Called to finalize the dictionary and select one based on whether or not - * the shrink-dict flag was enabled. If enabled the dictionary used is the - * smallest dictionary within a specified regression of the compressed size - * from the largest dictionary. - */ - COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, - size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, - size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize); diff --git a/dist/darwin_amd64/include/lz4.h b/dist/darwin_amd64/include/lz4.h deleted file mode 100644 index 7ab1e48..0000000 --- a/dist/darwin_amd64/include/lz4.h +++ /dev/null @@ -1,774 +0,0 @@ -/* - * LZ4 - Fast LZ compression algorithm - * Header File - * Copyright (C) 2011-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 -*/ -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef LZ4_H_2983827168210 -#define LZ4_H_2983827168210 - -/* --- Dependency --- */ -#include /* size_t */ - - -/** - Introduction - - LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, - scalable with multi-cores CPU. It features an extremely fast decoder, with speed in - multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. - - The LZ4 compression library provides in-memory compression and decompression functions. - It gives full buffer control to user. - Compression can be done in: - - a single step (described as Simple Functions) - - a single step, reusing a context (described in Advanced Functions) - - unbounded multiple steps (described as Streaming compression) - - lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). - Decompressing such a compressed block requires additional metadata. - Exact metadata depends on exact decompression function. - For the typical case of LZ4_decompress_safe(), - metadata includes block's compressed size, and maximum bound of decompressed size. - Each application is free to encode and pass such metadata in whichever way it wants. - - lz4.h only handle blocks, it can not generate Frames. - - Blocks are different from Frames (doc/lz4_Frame_format.md). - Frames bundle both blocks and metadata in a specified manner. - Embedding metadata is required for compressed data to be self-contained and portable. - Frame format is delivered through a companion API, declared in lz4frame.h. - The `lz4` CLI can only manage frames. -*/ - -/*^*************************************************************** -* Export parameters -*****************************************************************/ -/* -* LZ4_DLL_EXPORT : -* Enable exporting of functions when building a Windows DLL -* LZ4LIB_VISIBILITY : -* Control library symbols visibility. -*/ -#ifndef LZ4LIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define LZ4LIB_VISIBILITY -# endif -#endif -#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY -#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define LZ4LIB_API LZ4LIB_VISIBILITY -#endif - -/*------ Version ------*/ -#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ -#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */ - -#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) - -#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE -#define LZ4_QUOTE(str) #str -#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) -#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) - -LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */ -LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */ - - -/*-************************************ -* Tuning parameter -**************************************/ -/*! - * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio. - * Reduced memory usage may improve speed, thanks to better cache locality. - * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache - */ -#ifndef LZ4_MEMORY_USAGE -# define LZ4_MEMORY_USAGE 14 -#endif - - -/*-************************************ -* Simple Functions -**************************************/ -/*! LZ4_compress_default() : - * Compresses 'srcSize' bytes from buffer 'src' - * into already allocated 'dst' buffer of size 'dstCapacity'. - * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). - * It also runs faster, so it's a recommended setting. - * If the function cannot compress 'src' into a more limited 'dst' budget, - * compression stops *immediately*, and the function result is zero. - * In which case, 'dst' content is undefined (invalid). - * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. - * dstCapacity : size of buffer 'dst' (which must be already allocated) - * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) - * or 0 if compression fails - * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). - */ -LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); - -/*! LZ4_decompress_safe() : - * compressedSize : is the exact complete size of the compressed block. - * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. - * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) - * If destination buffer is not large enough, decoding will stop and output an error code (negative value). - * If the source stream is detected malformed, the function will stop decoding and return a negative result. - * Note 1 : This function is protected against malicious data packets : - * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, - * even if the compressed block is maliciously modified to order the decoder to do these actions. - * In such case, the decoder stops immediately, and considers the compressed block malformed. - * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. - * The implementation is free to send / store / derive this information in whichever way is most beneficial. - * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. - */ -LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); - - -/*-************************************ -* Advanced Functions -**************************************/ -#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ -#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) - -/*! LZ4_compressBound() : - Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) - This function is primarily useful for memory allocation purposes (destination buffer size). - Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). - Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) - inputSize : max supported value is LZ4_MAX_INPUT_SIZE - return : maximum output size in a "worst case" scenario - or 0, if input size is incorrect (too large or negative) -*/ -LZ4LIB_API int LZ4_compressBound(int inputSize); - -/*! LZ4_compress_fast() : - Same as LZ4_compress_default(), but allows selection of "acceleration" factor. - The larger the acceleration value, the faster the algorithm, but also the lesser the compression. - It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. - An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). - Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). -*/ -LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - - -/*! LZ4_compress_fast_extState() : - * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. - * Use LZ4_sizeofState() to know how much memory must be allocated, - * and allocate it on 8-bytes boundaries (using `malloc()` typically). - * Then, provide this buffer as `void* state` to compression function. - */ -LZ4LIB_API int LZ4_sizeofState(void); -LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - - -/*! LZ4_compress_destSize() : - * Reverse the logic : compresses as much data as possible from 'src' buffer - * into already allocated buffer 'dst', of size >= 'targetDestSize'. - * This function either compresses the entire 'src' content into 'dst' if it's large enough, - * or fill 'dst' buffer completely with as much data as possible from 'src'. - * note: acceleration parameter is fixed to "default". - * - * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. - * New value is necessarily <= input value. - * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) - * or 0 if compression fails. - * - * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+): - * the produced compressed content could, in specific circumstances, - * require to be decompressed into a destination buffer larger - * by at least 1 byte than the content to decompress. - * If an application uses `LZ4_compress_destSize()`, - * it's highly recommended to update liblz4 to v1.9.2 or better. - * If this can't be done or ensured, - * the receiving decompression function should provide - * a dstCapacity which is > decompressedSize, by at least 1 byte. - * See https://github.com/lz4/lz4/issues/859 for details - */ -LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); - - -/*! LZ4_decompress_safe_partial() : - * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', - * into destination buffer 'dst' of size 'dstCapacity'. - * Up to 'targetOutputSize' bytes will be decoded. - * The function stops decoding on reaching this objective. - * This can be useful to boost performance - * whenever only the beginning of a block is required. - * - * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) - * If source stream is detected malformed, function returns a negative result. - * - * Note 1 : @return can be < targetOutputSize, if compressed block contains less data. - * - * Note 2 : targetOutputSize must be <= dstCapacity - * - * Note 3 : this function effectively stops decoding on reaching targetOutputSize, - * so dstCapacity is kind of redundant. - * This is because in older versions of this function, - * decoding operation would still write complete sequences. - * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, - * it could write more bytes, though only up to dstCapacity. - * Some "margin" used to be required for this operation to work properly. - * Thankfully, this is no longer necessary. - * The function nonetheless keeps the same signature, in an effort to preserve API compatibility. - * - * Note 4 : If srcSize is the exact size of the block, - * then targetOutputSize can be any value, - * including larger than the block's decompressed size. - * The function will, at most, generate block's decompressed size. - * - * Note 5 : If srcSize is _larger_ than block's compressed size, - * then targetOutputSize **MUST** be <= block's decompressed size. - * Otherwise, *silent corruption will occur*. - */ -LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); - - -/*-********************************************* -* Streaming Compression Functions -***********************************************/ -typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ - -LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); -LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); - -/*! LZ4_resetStream_fast() : v1.9.0+ - * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks - * (e.g., LZ4_compress_fast_continue()). - * - * An LZ4_stream_t must be initialized once before usage. - * This is automatically done when created by LZ4_createStream(). - * However, should the LZ4_stream_t be simply declared on stack (for example), - * it's necessary to initialize it first, using LZ4_initStream(). - * - * After init, start any new stream with LZ4_resetStream_fast(). - * A same LZ4_stream_t can be re-used multiple times consecutively - * and compress multiple streams, - * provided that it starts each new stream with LZ4_resetStream_fast(). - * - * LZ4_resetStream_fast() is much faster than LZ4_initStream(), - * but is not compatible with memory regions containing garbage data. - * - * Note: it's only useful to call LZ4_resetStream_fast() - * in the context of streaming compression. - * The *extState* functions perform their own resets. - * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. - */ -LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); - -/*! LZ4_loadDict() : - * Use this function to reference a static dictionary into LZ4_stream_t. - * The dictionary must remain available during compression. - * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. - * The same dictionary will have to be loaded on decompression side for successful decoding. - * Dictionary are useful for better compression of small data (KB range). - * While LZ4 accept any input as dictionary, - * results are generally better when using Zstandard's Dictionary Builder. - * Loading a size of 0 is allowed, and is the same as reset. - * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) - */ -LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); - -/*! LZ4_compress_fast_continue() : - * Compress 'src' content using data from previously compressed blocks, for better compression ratio. - * 'dst' buffer must be already allocated. - * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - * - * @return : size of compressed block - * or 0 if there is an error (typically, cannot fit into 'dst'). - * - * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. - * Each block has precise boundaries. - * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. - * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. - * - * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! - * - * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. - * Make sure that buffers are separated, by at least one byte. - * This construction ensures that each block only depends on previous block. - * - * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. - * - * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. - */ -LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - -/*! LZ4_saveDict() : - * If last 64KB data cannot be guaranteed to remain available at its current memory location, - * save it into a safer place (char* safeBuffer). - * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), - * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. - * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. - */ -LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); - - -/*-********************************************** -* Streaming Decompression Functions -* Bufferless synchronous API -************************************************/ -typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ - -/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : - * creation / destruction of streaming decompression tracking context. - * A tracking context can be re-used multiple times. - */ -LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); -LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); - -/*! LZ4_setStreamDecode() : - * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. - * Use this function to start decompression of a new stream of blocks. - * A dictionary can optionally be set. Use NULL or size 0 for a reset order. - * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. - * @return : 1 if OK, 0 if error - */ -LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); - -/*! LZ4_decoderRingBufferSize() : v1.8.2+ - * Note : in a ring buffer scenario (optional), - * blocks are presumed decompressed next to each other - * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), - * at which stage it resumes from beginning of ring buffer. - * When setting such a ring buffer for streaming decompression, - * provides the minimum size of this ring buffer - * to be compatible with any source respecting maxBlockSize condition. - * @return : minimum ring buffer size, - * or 0 if there is an error (invalid maxBlockSize). - */ -LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); -#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ - -/*! LZ4_decompress_*_continue() : - * These decoding functions allow decompression of consecutive blocks in "streaming" mode. - * A block is an unsplittable entity, it must be presented entirely to a decompression function. - * Decompression functions only accepts one block at a time. - * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. - * If less than 64KB of data has been decoded, all the data must be present. - * - * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : - * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). - * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. - * In which case, encoding and decoding buffers do not need to be synchronized. - * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. - * - Synchronized mode : - * Decompression buffer size is _exactly_ the same as compression buffer size, - * and follows exactly same update rule (block boundaries at same positions), - * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), - * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). - * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * In which case, encoding and decoding buffers do not need to be synchronized, - * and encoding ring buffer can have any size, including small ones ( < 64 KB). - * - * Whenever these conditions are not possible, - * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, - * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. -*/ -LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity); - - -/*! LZ4_decompress_*_usingDict() : - * These decoding functions work the same as - * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() - * They are stand-alone, and don't need an LZ4_streamDecode_t structure. - * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. - * Performance tip : Decompression speed can be substantially increased - * when dst == dictStart + dictSize. - */ -LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize); - -#endif /* LZ4_H_2983827168210 */ - - -/*^************************************* - * !!!!!! STATIC LINKING ONLY !!!!!! - ***************************************/ - -/*-**************************************************************************** - * Experimental section - * - * Symbols declared in this section must be considered unstable. Their - * signatures or semantics may change, or they may be removed altogether in the - * future. They are therefore only safe to depend on when the caller is - * statically linked against the library. - * - * To protect against unsafe usage, not only are the declarations guarded, - * the definitions are hidden by default - * when building LZ4 as a shared/dynamic library. - * - * In order to access these declarations, - * define LZ4_STATIC_LINKING_ONLY in your application - * before including LZ4's headers. - * - * In order to make their implementations accessible dynamically, you must - * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. - ******************************************************************************/ - -#ifdef LZ4_STATIC_LINKING_ONLY - -#ifndef LZ4_STATIC_3504398509 -#define LZ4_STATIC_3504398509 - -#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS -#define LZ4LIB_STATIC_API LZ4LIB_API -#else -#define LZ4LIB_STATIC_API -#endif - - -/*! LZ4_compress_fast_extState_fastReset() : - * A variant of LZ4_compress_fast_extState(). - * - * Using this variant avoids an expensive initialization step. - * It is only safe to call if the state buffer is known to be correctly initialized already - * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). - * From a high level, the difference is that - * this function initializes the provided state with a call to something like LZ4_resetStream_fast() - * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). - */ -LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - -/*! LZ4_attach_dictionary() : - * This is an experimental API that allows - * efficient use of a static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a - * working LZ4_stream_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDict() should - * be expected to work. - * - * Alternatively, the provided dictionaryStream may be NULL, - * in which case any existing dictionary stream is unset. - * - * If a dictionary is provided, it replaces any pre-existing stream history. - * The dictionary contents are the only history that can be referenced and - * logically immediately precede the data compressed in the first subsequent - * compression call. - * - * The dictionary will only remain attached to the working stream through the - * first compression call, at the end of which it is cleared. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the completion of the first compression call on the stream. - */ -LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream); - - -/*! In-place compression and decompression - * - * It's possible to have input and output sharing the same buffer, - * for highly contrained memory environments. - * In both cases, it requires input to lay at the end of the buffer, - * and decompression to start at beginning of the buffer. - * Buffer size must feature some margin, hence be larger than final size. - * - * |<------------------------buffer--------------------------------->| - * |<-----------compressed data--------->| - * |<-----------decompressed size------------------>| - * |<----margin---->| - * - * This technique is more useful for decompression, - * since decompressed size is typically larger, - * and margin is short. - * - * In-place decompression will work inside any buffer - * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). - * This presumes that decompressedSize > compressedSize. - * Otherwise, it means compression actually expanded data, - * and it would be more efficient to store such data with a flag indicating it's not compressed. - * This can happen when data is not compressible (already compressed, or encrypted). - * - * For in-place compression, margin is larger, as it must be able to cope with both - * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, - * and data expansion, which can happen when input is not compressible. - * As a consequence, buffer size requirements are much higher, - * and memory savings offered by in-place compression are more limited. - * - * There are ways to limit this cost for compression : - * - Reduce history size, by modifying LZ4_DISTANCE_MAX. - * Note that it is a compile-time constant, so all compressions will apply this limit. - * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, - * so it's a reasonable trick when inputs are known to be small. - * - Require the compressor to deliver a "maximum compressed size". - * This is the `dstCapacity` parameter in `LZ4_compress*()`. - * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, - * in which case, the return code will be 0 (zero). - * The caller must be ready for these cases to happen, - * and typically design a backup scheme to send data uncompressed. - * The combination of both techniques can significantly reduce - * the amount of margin required for in-place compression. - * - * In-place compression can work in any buffer - * which size is >= (maxCompressedSize) - * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. - * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, - * so it's possible to reduce memory requirements by playing with them. - */ - -#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) -#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ - -#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ -# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - -#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ -#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ - -#endif /* LZ4_STATIC_3504398509 */ -#endif /* LZ4_STATIC_LINKING_ONLY */ - - - -#ifndef LZ4_H_98237428734687 -#define LZ4_H_98237428734687 - -/*-************************************************************ - * Private Definitions - ************************************************************** - * Do not use these definitions directly. - * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. - * Accessing members will expose user code to API and/or ABI break in future versions of the library. - **************************************************************/ -#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) -#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) -#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ - -#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include - typedef int8_t LZ4_i8; - typedef uint8_t LZ4_byte; - typedef uint16_t LZ4_u16; - typedef uint32_t LZ4_u32; -#else - typedef signed char LZ4_i8; - typedef unsigned char LZ4_byte; - typedef unsigned short LZ4_u16; - typedef unsigned int LZ4_u32; -#endif - -typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; -struct LZ4_stream_t_internal { - LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]; - LZ4_u32 currentOffset; - LZ4_u32 tableType; - const LZ4_byte* dictionary; - const LZ4_stream_t_internal* dictCtx; - LZ4_u32 dictSize; -}; - -typedef struct { - const LZ4_byte* externalDict; - size_t extDictSize; - const LZ4_byte* prefixEnd; - size_t prefixSize; -} LZ4_streamDecode_t_internal; - - -/*! LZ4_stream_t : - * Do not use below internal definitions directly ! - * Declare or allocate an LZ4_stream_t instead. - * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended. - * The structure definition can be convenient for static allocation - * (on stack, or as part of larger structure). - * Init this structure with LZ4_initStream() before first use. - * note : only use this definition in association with static linking ! - * this definition is not API/ABI safe, and may change in future versions. - */ -#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */ -#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*)) -union LZ4_stream_u { - void* table[LZ4_STREAMSIZE_VOIDP]; - LZ4_stream_t_internal internal_donotuse; -}; /* previously typedef'd to LZ4_stream_t */ - - -/*! LZ4_initStream() : v1.9.0+ - * An LZ4_stream_t structure must be initialized at least once. - * This is automatically done when invoking LZ4_createStream(), - * but it's not when the structure is simply declared on stack (for example). - * - * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. - * It can also initialize any arbitrary buffer of sufficient size, - * and will @return a pointer of proper type upon initialization. - * - * Note : initialization fails if size and alignment conditions are not respected. - * In which case, the function will @return NULL. - * Note2: An LZ4_stream_t structure guarantees correct alignment and size. - * Note3: Before v1.9.0, use LZ4_resetStream() instead - */ -LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); - - -/*! LZ4_streamDecode_t : - * information structure to track an LZ4 stream during decompression. - * init this structure using LZ4_setStreamDecode() before first use. - * note : only use in association with static linking ! - * this definition is not API/ABI safe, - * and may change in a future version ! - */ -#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ ) -#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) -union LZ4_streamDecode_u { - unsigned long long table[LZ4_STREAMDECODESIZE_U64]; - LZ4_streamDecode_t_internal internal_donotuse; -} ; /* previously typedef'd to LZ4_streamDecode_t */ - - - -/*-************************************ -* Obsolete Functions -**************************************/ - -/*! Deprecation warnings - * - * Deprecated functions make the compiler generate a warning when invoked. - * This is meant to invite users to update their source code. - * Should deprecation warnings be a problem, it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc - * or _CRT_SECURE_NO_WARNINGS in Visual. - * - * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS - * before including the header file. - */ -#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS -# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define LZ4_DEPRECATED(message) [[deprecated(message)]] -# elif defined(_MSC_VER) -# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) -# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45)) -# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31) -# define LZ4_DEPRECATED(message) __attribute__((deprecated)) -# else -# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler") -# define LZ4_DEPRECATED(message) /* disabled */ -# endif -#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ - -/*! Obsolete compression functions (since v1.7.3) */ -LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); -LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); - -/*! Obsolete decompression functions (since v1.8.0) */ -LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); -LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); - -/* Obsolete streaming functions (since v1.7.0) - * degraded functionality; do not use! - * - * In order to perform streaming compression, these functions depended on data - * that is no longer tracked in the state. They have been preserved as well as - * possible: using them will still produce a correct output. However, they don't - * actually retain any history between compression calls. The compression ratio - * achieved will therefore be no better than compressing each chunk - * independently. - */ -LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); -LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); -LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); -LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); - -/*! Obsolete streaming decoding functions (since v1.7.0) */ -LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); -LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); - -/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) : - * These functions used to be faster than LZ4_decompress_safe(), - * but this is no longer the case. They are now slower. - * This is because LZ4_decompress_fast() doesn't know the input size, - * and therefore must progress more cautiously into the input buffer to not read beyond the end of block. - * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. - * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. - * - * The last remaining LZ4_decompress_fast() specificity is that - * it can decompress a block without knowing its compressed size. - * Such functionality can be achieved in a more secure manner - * by employing LZ4_decompress_safe_partial(). - * - * Parameters: - * originalSize : is the uncompressed size to regenerate. - * `dst` must be already allocated, its size must be >= 'originalSize' bytes. - * @return : number of bytes read from source buffer (== compressed size). - * The function expects to finish at block's end exactly. - * If the source stream is detected malformed, the function stops decoding and returns a negative result. - * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. - * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. - * Also, since match offsets are not validated, match reads from 'src' may underflow too. - * These issues never happen if input (compressed) data is correct. - * But they may happen if input data is invalid (error or intentional tampering). - * As a consequence, use these functions in trusted environments with trusted data **only**. - */ -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") -LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") -LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") -LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); - -/*! LZ4_resetStream() : - * An LZ4_stream_t structure must be initialized at least once. - * This is done with LZ4_initStream(), or LZ4_resetStream(). - * Consider switching to LZ4_initStream(), - * invoking LZ4_resetStream() will trigger deprecation warnings in the future. - */ -LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); - - -#endif /* LZ4_H_98237428734687 */ - - -#if defined (__cplusplus) -} -#endif diff --git a/dist/darwin_amd64/include/lz4frame.h b/dist/darwin_amd64/include/lz4frame.h deleted file mode 100644 index 4573317..0000000 --- a/dist/darwin_amd64/include/lz4frame.h +++ /dev/null @@ -1,623 +0,0 @@ -/* - LZ4 auto-framing library - Header File - Copyright (C) 2011-2017, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/* LZ4F is a stand-alone API able to create and decode LZ4 frames - * conformant with specification v1.6.1 in doc/lz4_Frame_format.md . - * Generated frames are compatible with `lz4` CLI. - * - * LZ4F also offers streaming capabilities. - * - * lz4.h is not required when using lz4frame.h, - * except to extract common constant such as LZ4_VERSION_NUMBER. - * */ - -#ifndef LZ4F_H_09782039843 -#define LZ4F_H_09782039843 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* --- Dependency --- */ -#include /* size_t */ - - -/** - Introduction - - lz4frame.h implements LZ4 frame specification (doc/lz4_Frame_format.md). - lz4frame.h provides frame compression functions that take care - of encoding standard metadata alongside LZ4-compressed blocks. -*/ - -/*-*************************************************************** - * Compiler specifics - *****************************************************************/ -/* LZ4_DLL_EXPORT : - * Enable exporting of functions when building a Windows DLL - * LZ4FLIB_VISIBILITY : - * Control library symbols visibility. - */ -#ifndef LZ4FLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define LZ4FLIB_VISIBILITY -# endif -#endif -#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY -#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY -#else -# define LZ4FLIB_API LZ4FLIB_VISIBILITY -#endif - -#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS -# define LZ4F_DEPRECATE(x) x -#else -# if defined(_MSC_VER) -# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */ -# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6)) -# define LZ4F_DEPRECATE(x) x __attribute__((deprecated)) -# else -# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */ -# endif -#endif - - -/*-************************************ - * Error management - **************************************/ -typedef size_t LZ4F_errorCode_t; - -LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */ -LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */ - - -/*-************************************ - * Frame compression types - ************************************* */ -/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */ -#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS -# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x -#else -# define LZ4F_OBSOLETE_ENUM(x) -#endif - -/* The larger the block size, the (slightly) better the compression ratio, - * though there are diminishing returns. - * Larger blocks also increase memory usage on both compression and decompression sides. - */ -typedef enum { - LZ4F_default=0, - LZ4F_max64KB=4, - LZ4F_max256KB=5, - LZ4F_max1MB=6, - LZ4F_max4MB=7 - LZ4F_OBSOLETE_ENUM(max64KB) - LZ4F_OBSOLETE_ENUM(max256KB) - LZ4F_OBSOLETE_ENUM(max1MB) - LZ4F_OBSOLETE_ENUM(max4MB) -} LZ4F_blockSizeID_t; - -/* Linked blocks sharply reduce inefficiencies when using small blocks, - * they compress better. - * However, some LZ4 decoders are only compatible with independent blocks */ -typedef enum { - LZ4F_blockLinked=0, - LZ4F_blockIndependent - LZ4F_OBSOLETE_ENUM(blockLinked) - LZ4F_OBSOLETE_ENUM(blockIndependent) -} LZ4F_blockMode_t; - -typedef enum { - LZ4F_noContentChecksum=0, - LZ4F_contentChecksumEnabled - LZ4F_OBSOLETE_ENUM(noContentChecksum) - LZ4F_OBSOLETE_ENUM(contentChecksumEnabled) -} LZ4F_contentChecksum_t; - -typedef enum { - LZ4F_noBlockChecksum=0, - LZ4F_blockChecksumEnabled -} LZ4F_blockChecksum_t; - -typedef enum { - LZ4F_frame=0, - LZ4F_skippableFrame - LZ4F_OBSOLETE_ENUM(skippableFrame) -} LZ4F_frameType_t; - -#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS -typedef LZ4F_blockSizeID_t blockSizeID_t; -typedef LZ4F_blockMode_t blockMode_t; -typedef LZ4F_frameType_t frameType_t; -typedef LZ4F_contentChecksum_t contentChecksum_t; -#endif - -/*! LZ4F_frameInfo_t : - * makes it possible to set or read frame parameters. - * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO, - * setting all parameters to default. - * It's then possible to update selectively some parameters */ -typedef struct { - LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */ - LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */ - LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */ - LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ - unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ - unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */ - LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */ -} LZ4F_frameInfo_t; - -#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */ - -/*! LZ4F_preferences_t : - * makes it possible to supply advanced compression instructions to streaming interface. - * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES, - * setting all parameters to default. - * All reserved fields must be set to zero. */ -typedef struct { - LZ4F_frameInfo_t frameInfo; - int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */ - unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */ - unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */ - unsigned reserved[3]; /* must be zero for forward compatibility */ -} LZ4F_preferences_t; - -#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */ - - -/*-********************************* -* Simple compression function -***********************************/ - -LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */ - -/*! LZ4F_compressFrameBound() : - * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. - * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. - * Note : this result is only usable with LZ4F_compressFrame(). - * It may also be used with LZ4F_compressUpdate() _if no flush() operation_ is performed. - */ -LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr); - -/*! LZ4F_compressFrame() : - * Compress an entire srcBuffer into a valid LZ4 frame. - * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_preferences_t* preferencesPtr); - - -/*-*********************************** -* Advanced compression functions -*************************************/ -typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */ -typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */ - -typedef struct { - unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */ - unsigned reserved[3]; -} LZ4F_compressOptions_t; - -/*--- Resource Management ---*/ - -#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */ -LZ4FLIB_API unsigned LZ4F_getVersion(void); - -/*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, which will be used in all compression operations. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version. - * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. - * The function will provide a pointer to a fully allocated LZ4F_cctx object. - * If @return != zero, there was an error during context creation. - * Object can release its memory using LZ4F_freeCompressionContext(); - */ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version); -LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx); - - -/*---- Compression ----*/ - -#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected paramaters */ -#define LZ4F_HEADER_SIZE_MAX 19 - -/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */ -#define LZ4F_BLOCK_HEADER_SIZE 4 - -/* Size in bytes of a block checksum footer in little-endian format. */ -#define LZ4F_BLOCK_CHECKSUM_SIZE 4 - -/* Size in bytes of the content checksum. */ -#define LZ4F_CONTENT_CHECKSUM_SIZE 4 - -/*! LZ4F_compressBegin() : - * will write the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default. - * @return : number of bytes written into dstBuffer for the header - * or an error code (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_compressBound() : - * Provides minimum dstCapacity required to guarantee success of - * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead. - * Note that the result is only valid for a single invocation of LZ4F_compressUpdate(). - * When invoking LZ4F_compressUpdate() multiple times, - * if the output buffer is gradually filled up instead of emptied and re-used from its start, - * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound(). - * @return is always the same for a srcSize and prefsPtr. - * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. - * tech details : - * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. - * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd(). - * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin(). - */ -LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. - * This value is provided by LZ4F_compressBound(). - * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - * LZ4F_compressUpdate() doesn't guarantee error recovery. - * When an error occurs, compression context must be freed or resized. - * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - * or an error code if it fails (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* cOptPtr); - -/*! LZ4F_flush() : - * When data must be generated and sent immediately, without waiting for a block to be completely filled, - * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. - * `dstCapacity` must be large enough to ensure the operation will be successful. - * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. - * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) - * or an error code if it fails (which can be tested using LZ4F_isError()) - * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - */ -LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* cOptPtr); - -/*! LZ4F_compressEnd() : - * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). - * It will flush whatever data remained within `cctx` (like LZ4_flush()) - * and properly finalize the frame, with an endMark and a checksum. - * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. - * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), - * or an error code if it fails (which can be tested using LZ4F_isError()) - * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. - */ -LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* cOptPtr); - - -/*-********************************* -* Decompression functions -***********************************/ -typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */ -typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */ - -typedef struct { - unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */ - unsigned reserved[3]; /* must be set to zero for forward compatibility */ -} LZ4F_decompressOptions_t; - - -/* Resource management */ - -/*! LZ4F_createDecompressionContext() : - * Create an LZ4F_dctx object, to track all decompression operations. - * The version provided MUST be LZ4F_VERSION. - * The function provides a pointer to an allocated and initialized LZ4F_dctx object. - * The result is an errorCode, which can be tested using LZ4F_isError(). - * dctx memory can be released using LZ4F_freeDecompressionContext(); - * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. - * That is, it should be == 0 if decompression has been completed fully and correctly. - */ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); -LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); - - -/*-*********************************** -* Streaming decompression functions -*************************************/ - -#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5 - -/*! LZ4F_headerSize() : v1.9.0+ - * Provide the header size of a frame starting at `src`. - * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH, - * which is enough to decode the header length. - * @return : size of frame header - * or an error code, which can be tested using LZ4F_isError() - * note : Frame header size is variable, but is guaranteed to be - * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes. - */ -LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize); - -/*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (max blockSize, dictID, etc.). - * Its usage is optional: user can call LZ4F_decompress() directly. - * - * Extracted information will fill an existing LZ4F_frameInfo_t structure. - * This can be useful for allocation and dictionary identification purposes. - * - * LZ4F_getFrameInfo() can work in the following situations : - * - * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress(). - * It will decode header from `srcBuffer`, - * consuming the header and starting the decoding process. - * - * Input size must be large enough to contain the full frame header. - * Frame header size can be known beforehand by LZ4F_headerSize(). - * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes, - * and not more than <= LZ4F_HEADER_SIZE_MAX bytes. - * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work. - * It's allowed to provide more input data than the header size, - * LZ4F_getFrameInfo() will only consume the header. - * - * If input size is not large enough, - * aka if it's smaller than header size, - * function will fail and return an error code. - * - * 2) After decoding has been started, - * it's possible to invoke LZ4F_getFrameInfo() anytime - * to extract already decoded frame parameters stored within dctx. - * - * Note that, if decoding has barely started, - * and not yet read enough information to decode the header, - * LZ4F_getFrameInfo() will fail. - * - * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value). - * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started, - * and when decoding the header has been successful. - * Decompression must then resume from (srcBuffer + *srcSizePtr). - * - * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call, - * or an error code which can be tested using LZ4F_isError(). - * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. - */ -LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, - LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr); - -/*! LZ4F_decompress() : - * Call this function repetitively to regenerate data compressed in `srcBuffer`. - * - * The function requires a valid dctx state. - * It will read up to *srcSizePtr bytes from srcBuffer, - * and decompress data into dstBuffer, of capacity *dstSizePtr. - * - * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). - * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). - * - * The function does not necessarily read all input bytes, so always check value in *srcSizePtr. - * Unconsumed source data must be presented again in subsequent invocations. - * - * `dstBuffer` can freely change between each consecutive function invocation. - * `dstBuffer` content will be overwritten. - * - * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. - * Schematically, it's the size of the current (or remaining) compressed block + header of next block. - * Respecting the hint provides some small speed benefit, because it skips intermediate buffers. - * This is just a hint though, it's always possible to provide any srcSize. - * - * When a frame is fully decoded, @return will be 0 (no more data expected). - * When provided with more bytes than necessary to decode a frame, - * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0. - * - * If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). - * After a decompression error, the `dctx` context is not resumable. - * Use LZ4F_resetDecompressionContext() to return to clean state. - * - * After a frame is fully decoded, dctx can be used again to decompress another frame. - */ -LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const LZ4F_decompressOptions_t* dOptPtr); - - -/*! LZ4F_resetDecompressionContext() : added in v1.8.0 - * In case of an error, the context is left in "undefined" state. - * In which case, it's necessary to reset it, before re-using it. - * This method can also be used to abruptly stop any unfinished decompression, - * and start a new one using same context resources. */ -LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */ - - - -#if defined (__cplusplus) -} -#endif - -#endif /* LZ4F_H_09782039843 */ - -#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) -#define LZ4F_H_STATIC_09782039843 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* These declarations are not stable and may change in the future. - * They are therefore only safe to depend on - * when the caller is statically linked against the library. - * To access their declarations, define LZ4F_STATIC_LINKING_ONLY. - * - * By default, these symbols aren't published into shared/dynamic libraries. - * You can override this behavior and force them to be published - * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. - * Use at your own risk. - */ -#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS -# define LZ4FLIB_STATIC_API LZ4FLIB_API -#else -# define LZ4FLIB_STATIC_API -#endif - - -/* --- Error List --- */ -#define LZ4F_LIST_ERRORS(ITEM) \ - ITEM(OK_NoError) \ - ITEM(ERROR_GENERIC) \ - ITEM(ERROR_maxBlockSize_invalid) \ - ITEM(ERROR_blockMode_invalid) \ - ITEM(ERROR_contentChecksumFlag_invalid) \ - ITEM(ERROR_compressionLevel_invalid) \ - ITEM(ERROR_headerVersion_wrong) \ - ITEM(ERROR_blockChecksum_invalid) \ - ITEM(ERROR_reservedFlag_set) \ - ITEM(ERROR_allocation_failed) \ - ITEM(ERROR_srcSize_tooLarge) \ - ITEM(ERROR_dstMaxSize_tooSmall) \ - ITEM(ERROR_frameHeader_incomplete) \ - ITEM(ERROR_frameType_unknown) \ - ITEM(ERROR_frameSize_wrong) \ - ITEM(ERROR_srcPtr_wrong) \ - ITEM(ERROR_decompressionFailed) \ - ITEM(ERROR_headerChecksum_invalid) \ - ITEM(ERROR_contentChecksum_invalid) \ - ITEM(ERROR_frameDecoding_alreadyStarted) \ - ITEM(ERROR_maxCode) - -#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, - -/* enum list is exposed, to handle specific errors */ -typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) - _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes; - -LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); - -LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(unsigned); - -/********************************** - * Bulk processing dictionary API - *********************************/ - -/* A Dictionary is useful for the compression of small messages (KB range). - * It dramatically improves compression efficiency. - * - * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful. - * Best results are generally achieved by using Zstandard's Dictionary Builder - * to generate a high-quality dictionary from a set of samples. - * - * Loading a dictionary has a cost, since it involves construction of tables. - * The Bulk processing dictionary API makes it possible to share this cost - * over an arbitrary number of compression jobs, even concurrently, - * markedly improving compression latency for these cases. - * - * The same dictionary will have to be used on the decompression side - * for decoding to be successful. - * To help identify the correct dictionary at decoding stage, - * the frame header allows optional embedding of a dictID field. - */ -typedef struct LZ4F_CDict_s LZ4F_CDict; - -/*! LZ4_createCDict() : - * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once. - * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ -LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); -LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); - - -/*! LZ4_compressFrame_usingCDict() : - * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. - * cctx must point to a context created by LZ4F_createCompressionContext(). - * If cdict==NULL, compress without a dictionary. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * If this condition is not respected, function will fail (@return an errorCode). - * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - * but it's not recommended, as it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict( - LZ4F_cctx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr); - - -/*! LZ4F_compressBegin_usingCDict() : - * Inits streaming dictionary compression, and writes the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : you may provide NULL as argument, - * however, it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer for the header, - * or an error code (which can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict( - LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* prefsPtr); - - -/*! LZ4F_decompress_usingDict() : - * Same as LZ4F_decompress(), using a predefined dictionary. - * Dictionary is used "in place", without any preprocessing. - * It must remain accessible throughout the entire frame decoding. */ -LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict( - LZ4F_dctx* dctxPtr, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr); - -#if defined (__cplusplus) -} -#endif - -#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */ diff --git a/dist/darwin_amd64/include/lz4hc.h b/dist/darwin_amd64/include/lz4hc.h deleted file mode 100644 index 3d441fb..0000000 --- a/dist/darwin_amd64/include/lz4hc.h +++ /dev/null @@ -1,413 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Header File - Copyright (C) 2011-2017, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -#ifndef LZ4_HC_H_19834876238432 -#define LZ4_HC_H_19834876238432 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* --- Dependency --- */ -/* note : lz4hc requires lz4.h/lz4.c for compilation */ -#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ - - -/* --- Useful constants --- */ -#define LZ4HC_CLEVEL_MIN 3 -#define LZ4HC_CLEVEL_DEFAULT 9 -#define LZ4HC_CLEVEL_OPT_MIN 10 -#define LZ4HC_CLEVEL_MAX 12 - - -/*-************************************ - * Block Compression - **************************************/ -/*! LZ4_compress_HC() : - * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm. - * `dst` must be already allocated. - * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") - * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") - * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. - * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. - * @return : the number of bytes written into 'dst' - * or 0 if compression fails. - */ -LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel); - - -/* Note : - * Decompression functions are provided within "lz4.h" (BSD license) - */ - - -/*! LZ4_compress_HC_extStateHC() : - * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. - * `state` size is provided by LZ4_sizeofStateHC(). - * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). - */ -LZ4LIB_API int LZ4_sizeofStateHC(void); -LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); - - -/*! LZ4_compress_HC_destSize() : v1.9.0+ - * Will compress as much data as possible from `src` - * to fit into `targetDstSize` budget. - * Result is provided in 2 parts : - * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) - * or 0 if compression fails. - * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src` - */ -LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize, - int compressionLevel); - - -/*-************************************ - * Streaming Compression - * Bufferless synchronous API - **************************************/ - typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ - -/*! LZ4_createStreamHC() and LZ4_freeStreamHC() : - * These functions create and release memory for LZ4 HC streaming state. - * Newly created states are automatically initialized. - * A same state can be used multiple times consecutively, - * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks. - */ -LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); -LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); - -/* - These functions compress data in successive blocks of any size, - using previous blocks as dictionary, to improve compression ratio. - One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks. - There is an exception for ring buffers, which can be smaller than 64 KB. - Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue(). - - Before starting compression, state must be allocated and properly initialized. - LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT. - - Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream) - or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental). - LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once, - which is automatically the case when state is created using LZ4_createStreamHC(). - - After reset, a first "fictional block" can be designated as initial dictionary, - using LZ4_loadDictHC() (Optional). - - Invoke LZ4_compress_HC_continue() to compress each successive block. - The number of blocks is unlimited. - Previous input blocks, including initial dictionary when present, - must remain accessible and unmodified during compression. - - It's allowed to update compression level anytime between blocks, - using LZ4_setCompressionLevel() (experimental). - - 'dst' buffer should be sized to handle worst case scenarios - (see LZ4_compressBound(), it ensures compression success). - In case of failure, the API does not guarantee recovery, - so the state _must_ be reset. - To ensure compression success - whenever `dst` buffer size cannot be made >= LZ4_compressBound(), - consider using LZ4_compress_HC_continue_destSize(). - - Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks, - it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC(). - Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB) - - After completing a streaming compression, - it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state, - just by resetting it, using LZ4_resetStreamHC_fast(). -*/ - -LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */ -LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize); - -LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, - const char* src, char* dst, - int srcSize, int maxDstSize); - -/*! LZ4_compress_HC_continue_destSize() : v1.9.0+ - * Similar to LZ4_compress_HC_continue(), - * but will read as much data as possible from `src` - * to fit into `targetDstSize` budget. - * Result is provided into 2 parts : - * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) - * or 0 if compression fails. - * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`. - * Note that this function may not consume the entire input. - */ -LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize); - -LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize); - - - -/*^********************************************** - * !!!!!! STATIC LINKING ONLY !!!!!! - ***********************************************/ - -/*-****************************************************************** - * PRIVATE DEFINITIONS : - * Do not use these definitions directly. - * They are merely exposed to allow static allocation of `LZ4_streamHC_t`. - * Declare an `LZ4_streamHC_t` directly, rather than any type below. - * Even then, only do so in the context of static linking, as definitions may change between versions. - ********************************************************************/ - -#define LZ4HC_DICTIONARY_LOGSIZE 16 -#define LZ4HC_MAXD (1<= LZ4HC_CLEVEL_OPT_MIN. - */ -LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed( - LZ4_streamHC_t* LZ4_streamHCPtr, int favor); - -/*! LZ4_resetStreamHC_fast() : v1.9.0+ - * When an LZ4_streamHC_t is known to be in a internally coherent state, - * it can often be prepared for a new compression with almost no work, only - * sometimes falling back to the full, expensive reset that is always required - * when the stream is in an indeterminate state (i.e., the reset performed by - * LZ4_resetStreamHC()). - * - * LZ4_streamHCs are guaranteed to be in a valid state when: - * - returned from LZ4_createStreamHC() - * - reset by LZ4_resetStreamHC() - * - memset(stream, 0, sizeof(LZ4_streamHC_t)) - * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast() - * - the stream was in a valid state and was then used in any compression call - * that returned success - * - the stream was in an indeterminate state and was used in a compression - * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that - * returned success - * - * Note: - * A stream that was last used in a compression call that returned an error - * may be passed to this function. However, it will be fully reset, which will - * clear any existing history and settings from the context. - */ -LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast( - LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); - -/*! LZ4_compress_HC_extStateHC_fastReset() : - * A variant of LZ4_compress_HC_extStateHC(). - * - * Using this variant avoids an expensive initialization step. It is only safe - * to call if the state buffer is known to be correctly initialized already - * (see above comment on LZ4_resetStreamHC_fast() for a definition of - * "correctly initialized"). From a high level, the difference is that this - * function initializes the provided state with a call to - * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a - * call to LZ4_resetStreamHC(). - */ -LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset ( - void* state, - const char* src, char* dst, - int srcSize, int dstCapacity, - int compressionLevel); - -/*! LZ4_attach_HC_dictionary() : - * This is an experimental API that allows for the efficient use of a - * static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a - * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDictHC() should - * be expected to work. - * - * Alternatively, the provided dictionary stream pointer may be NULL, in which - * case any existing dictionary stream is unset. - * - * A dictionary should only be attached to a stream without any history (i.e., - * a stream that has just been reset). - * - * The dictionary will remain attached to the working stream only for the - * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the - * dictionary context association from the working stream. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the lifetime of the stream session. - */ -LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary( - LZ4_streamHC_t *working_stream, - const LZ4_streamHC_t *dictionary_stream); - -#if defined (__cplusplus) -} -#endif - -#endif /* LZ4_HC_SLO_098092834 */ -#endif /* LZ4_HC_STATIC_LINKING_ONLY */ diff --git a/dist/darwin_amd64/include/rocksdb/advanced_options.h b/dist/darwin_amd64/include/rocksdb/advanced_options.h deleted file mode 100644 index c76c604..0000000 --- a/dist/darwin_amd64/include/rocksdb/advanced_options.h +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include - -#include "rocksdb/compression_type.h" -#include "rocksdb/memtablerep.h" -#include "rocksdb/universal_compaction.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -class SliceTransform; -class TablePropertiesCollectorFactory; -class TableFactory; -struct Options; - -enum CompactionStyle : char { - // level based compaction style - kCompactionStyleLevel = 0x0, - // Universal compaction style - // Not supported in ROCKSDB_LITE. - kCompactionStyleUniversal = 0x1, - // FIFO compaction style - // Not supported in ROCKSDB_LITE - kCompactionStyleFIFO = 0x2, - // Disable background compaction. Compaction jobs are submitted - // via CompactFiles(). - // Not supported in ROCKSDB_LITE - kCompactionStyleNone = 0x3, -}; - -// In Level-based compaction, it Determines which file from a level to be -// picked to merge to the next level. We suggest people try -// kMinOverlappingRatio first when you tune your database. -enum CompactionPri : char { - // Slightly prioritize larger files by size compensated by #deletes - kByCompensatedSize = 0x0, - // First compact files whose data's latest update time is oldest. - // Try this if you only update some hot keys in small ranges. - kOldestLargestSeqFirst = 0x1, - // First compact files whose range hasn't been compacted to the next level - // for the longest. If your updates are random across the key space, - // write amplification is slightly better with this option. - kOldestSmallestSeqFirst = 0x2, - // First compact files whose ratio between overlapping size in next level - // and its size is the smallest. It in many cases can optimize write - // amplification. - kMinOverlappingRatio = 0x3, -}; - -struct CompactionOptionsFIFO { - // once the total sum of table files reaches this, we will delete the oldest - // table file - // Default: 1GB - uint64_t max_table_files_size; - - // If true, try to do compaction to compact smaller files into larger ones. - // Minimum files to compact follows options.level0_file_num_compaction_trigger - // and compaction won't trigger if average compact bytes per del file is - // larger than options.write_buffer_size. This is to protect large files - // from being compacted again. - // Default: false; - bool allow_compaction = false; - - CompactionOptionsFIFO() : max_table_files_size(1 * 1024 * 1024 * 1024) {} - CompactionOptionsFIFO(uint64_t _max_table_files_size, bool _allow_compaction) - : max_table_files_size(_max_table_files_size), - allow_compaction(_allow_compaction) {} -}; - -// Compression options for different compression algorithms like Zlib -struct CompressionOptions { - // RocksDB's generic default compression level. Internally it'll be translated - // to the default compression level specific to the library being used (see - // comment above `ColumnFamilyOptions::compression`). - // - // The default value is the max 16-bit int as it'll be written out in OPTIONS - // file, which should be portable. - const static int kDefaultCompressionLevel = 32767; - - int window_bits; - int level; - int strategy; - - // Maximum size of dictionaries used to prime the compression library. - // Enabling dictionary can improve compression ratios when there are - // repetitions across data blocks. - // - // The dictionary is created by sampling the SST file data. If - // `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's - // dictionary generator. Otherwise, the random samples are used directly as - // the dictionary. - // - // When compression dictionary is disabled, we compress and write each block - // before buffering data for the next one. When compression dictionary is - // enabled, we buffer all SST file data in-memory so we can sample it, as data - // can only be compressed and written after the dictionary has been finalized. - // So users of this feature may see increased memory usage. - // - // Default: 0. - uint32_t max_dict_bytes; - - // Maximum size of training data passed to zstd's dictionary trainer. Using - // zstd's dictionary trainer can achieve even better compression ratio - // improvements than using `max_dict_bytes` alone. - // - // The training data will be used to generate a dictionary of max_dict_bytes. - // - // Default: 0. - uint32_t zstd_max_train_bytes; - - // Number of threads for parallel compression. - // Parallel compression is enabled only if threads > 1. - // THE FEATURE IS STILL EXPERIMENTAL - // - // This option is valid only when BlockBasedTable is used. - // - // When parallel compression is enabled, SST size file sizes might be - // more inflated compared to the target size, because more data of unknown - // compressed size is in flight when compression is parallelized. To be - // reasonably accurate, this inflation is also estimated by using historical - // compression ratio and current bytes inflight. - // - // Default: 1. - uint32_t parallel_threads; - - // When the compression options are set by the user, it will be set to "true". - // For bottommost_compression_opts, to enable it, user must set enabled=true. - // Otherwise, bottommost compression will use compression_opts as default - // compression options. - // - // For compression_opts, if compression_opts.enabled=false, it is still - // used as compression options for compression process. - // - // Default: false. - bool enabled; - - CompressionOptions() - : window_bits(-14), - level(kDefaultCompressionLevel), - strategy(0), - max_dict_bytes(0), - zstd_max_train_bytes(0), - parallel_threads(1), - enabled(false) {} - CompressionOptions(int wbits, int _lev, int _strategy, int _max_dict_bytes, - int _zstd_max_train_bytes, int _parallel_threads, - bool _enabled) - : window_bits(wbits), - level(_lev), - strategy(_strategy), - max_dict_bytes(_max_dict_bytes), - zstd_max_train_bytes(_zstd_max_train_bytes), - parallel_threads(_parallel_threads), - enabled(_enabled) {} -}; - -enum UpdateStatus { // Return status For inplace update callback - UPDATE_FAILED = 0, // Nothing to update - UPDATED_INPLACE = 1, // Value updated inplace - UPDATED = 2, // No inplace update. Merged value set -}; - -struct AdvancedColumnFamilyOptions { - // The maximum number of write buffers that are built up in memory. - // The default and the minimum number is 2, so that when 1 write buffer - // is being flushed to storage, new writes can continue to the other - // write buffer. - // If max_write_buffer_number > 3, writing will be slowed down to - // options.delayed_write_rate if we are writing to the last write buffer - // allowed. - // - // Default: 2 - // - // Dynamically changeable through SetOptions() API - int max_write_buffer_number = 2; - - // The minimum number of write buffers that will be merged together - // before writing to storage. If set to 1, then - // all write buffers are flushed to L0 as individual files and this increases - // read amplification because a get request has to check in all of these - // files. Also, an in-memory merge may result in writing lesser - // data to storage if there are duplicate records in each of these - // individual write buffers. Default: 1 - int min_write_buffer_number_to_merge = 1; - - // DEPRECATED - // The total maximum number of write buffers to maintain in memory including - // copies of buffers that have already been flushed. Unlike - // max_write_buffer_number, this parameter does not affect flushing. - // This parameter is being replaced by max_write_buffer_size_to_maintain. - // If both parameters are set to non-zero values, this parameter will be - // ignored. - int max_write_buffer_number_to_maintain = 0; - - // The total maximum size(bytes) of write buffers to maintain in memory - // including copies of buffers that have already been flushed. This parameter - // only affects trimming of flushed buffers and does not affect flushing. - // This controls the maximum amount of write history that will be available - // in memory for conflict checking when Transactions are used. The actual - // size of write history (flushed Memtables) might be higher than this limit - // if further trimming will reduce write history total size below this - // limit. For example, if max_write_buffer_size_to_maintain is set to 64MB, - // and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB. - // Because trimming the next Memtable of size 20MB will reduce total memory - // usage to 52MB which is below the limit, RocksDB will stop trimming. - // - // When using an OptimisticTransactionDB: - // If this value is too low, some transactions may fail at commit time due - // to not being able to determine whether there were any write conflicts. - // - // When using a TransactionDB: - // If Transaction::SetSnapshot is used, TransactionDB will read either - // in-memory write buffers or SST files to do write-conflict checking. - // Increasing this value can reduce the number of reads to SST files - // done for conflict detection. - // - // Setting this value to 0 will cause write buffers to be freed immediately - // after they are flushed. If this value is set to -1, - // 'max_write_buffer_number * write_buffer_size' will be used. - // - // Default: - // If using a TransactionDB/OptimisticTransactionDB, the default value will - // be set to the value of 'max_write_buffer_number * write_buffer_size' - // if it is not explicitly set by the user. Otherwise, the default is 0. - int64_t max_write_buffer_size_to_maintain = 0; - - // Allows thread-safe inplace updates. If this is true, there is no way to - // achieve point-in-time consistency using snapshot or iterator (assuming - // concurrent updates). Hence iterator and multi-get will return results - // which are not consistent as of any point-in-time. - // If inplace_callback function is not set, - // Put(key, new_value) will update inplace the existing_value iff - // * key exists in current memtable - // * new sizeof(new_value) <= sizeof(existing_value) - // * existing_value for that key is a put i.e. kTypeValue - // If inplace_callback function is set, check doc for inplace_callback. - // Default: false. - bool inplace_update_support = false; - - // Number of locks used for inplace update - // Default: 10000, if inplace_update_support = true, else 0. - // - // Dynamically changeable through SetOptions() API - size_t inplace_update_num_locks = 10000; - - // existing_value - pointer to previous value (from both memtable and sst). - // nullptr if key doesn't exist - // existing_value_size - pointer to size of existing_value). - // nullptr if key doesn't exist - // delta_value - Delta value to be merged with the existing_value. - // Stored in transaction logs. - // merged_value - Set when delta is applied on the previous value. - - // Applicable only when inplace_update_support is true, - // this callback function is called at the time of updating the memtable - // as part of a Put operation, lets say Put(key, delta_value). It allows the - // 'delta_value' specified as part of the Put operation to be merged with - // an 'existing_value' of the key in the database. - - // If the merged value is smaller in size that the 'existing_value', - // then this function can update the 'existing_value' buffer inplace and - // the corresponding 'existing_value'_size pointer, if it wishes to. - // The callback should return UpdateStatus::UPDATED_INPLACE. - // In this case. (In this case, the snapshot-semantics of the rocksdb - // Iterator is not atomic anymore). - - // If the merged value is larger in size than the 'existing_value' or the - // application does not wish to modify the 'existing_value' buffer inplace, - // then the merged value should be returned via *merge_value. It is set by - // merging the 'existing_value' and the Put 'delta_value'. The callback should - // return UpdateStatus::UPDATED in this case. This merged value will be added - // to the memtable. - - // If merging fails or the application does not wish to take any action, - // then the callback should return UpdateStatus::UPDATE_FAILED. - - // Please remember that the original call from the application is Put(key, - // delta_value). So the transaction log (if enabled) will still contain (key, - // delta_value). The 'merged_value' is not stored in the transaction log. - // Hence the inplace_callback function should be consistent across db reopens. - - // Default: nullptr - UpdateStatus (*inplace_callback)(char* existing_value, - uint32_t* existing_value_size, - Slice delta_value, - std::string* merged_value) = nullptr; - - // if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, - // create prefix bloom for memtable with the size of - // write_buffer_size * memtable_prefix_bloom_size_ratio. - // If it is larger than 0.25, it is sanitized to 0.25. - // - // Default: 0 (disable) - // - // Dynamically changeable through SetOptions() API - double memtable_prefix_bloom_size_ratio = 0.0; - - // Enable whole key bloom filter in memtable. Note this will only take effect - // if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering - // can potentially reduce CPU usage for point-look-ups. - // - // Default: false (disable) - // - // Dynamically changeable through SetOptions() API - bool memtable_whole_key_filtering = false; - - // Page size for huge page for the arena used by the memtable. If <=0, it - // won't allocate from huge page but from malloc. - // Users are responsible to reserve huge pages for it to be allocated. For - // example: - // sysctl -w vm.nr_hugepages=20 - // See linux doc Documentation/vm/hugetlbpage.txt - // If there isn't enough free huge page available, it will fall back to - // malloc. - // - // Dynamically changeable through SetOptions() API - size_t memtable_huge_page_size = 0; - - // If non-nullptr, memtable will use the specified function to extract - // prefixes for keys, and for each prefix maintain a hint of insert location - // to reduce CPU usage for inserting keys with the prefix. Keys out of - // domain of the prefix extractor will be insert without using hints. - // - // Currently only the default skiplist based memtable implements the feature. - // All other memtable implementation will ignore the option. It incurs ~250 - // additional bytes of memory overhead to store a hint for each prefix. - // Also concurrent writes (when allow_concurrent_memtable_write is true) will - // ignore the option. - // - // The option is best suited for workloads where keys will likely to insert - // to a location close the last inserted key with the same prefix. - // One example could be inserting keys of the form (prefix + timestamp), - // and keys of the same prefix always comes in with time order. Another - // example would be updating the same key over and over again, in which case - // the prefix can be the key itself. - // - // Default: nullptr (disable) - std::shared_ptr - memtable_insert_with_hint_prefix_extractor = nullptr; - - // Control locality of bloom filter probes to improve CPU cache hit rate. - // This option now only applies to plaintable prefix bloom. This - // optimization is turned off when set to 0, and positive number to turn - // it on. - // Default: 0 - uint32_t bloom_locality = 0; - - // size of one block in arena memory allocation. - // If <= 0, a proper value is automatically calculated (usually 1/8 of - // writer_buffer_size, rounded up to a multiple of 4KB). - // - // There are two additional restriction of the specified size: - // (1) size should be in the range of [4096, 2 << 30] and - // (2) be the multiple of the CPU word (which helps with the memory - // alignment). - // - // We'll automatically check and adjust the size number to make sure it - // conforms to the restrictions. - // - // Default: 0 - // - // Dynamically changeable through SetOptions() API - size_t arena_block_size = 0; - - // Different levels can have different compression policies. There - // are cases where most lower levels would like to use quick compression - // algorithms while the higher levels (which have more data) use - // compression algorithms that have better compression but could - // be slower. This array, if non-empty, should have an entry for - // each level of the database; these override the value specified in - // the previous field 'compression'. - // - // NOTICE if level_compaction_dynamic_level_bytes=true, - // compression_per_level[0] still determines L0, but other elements - // of the array are based on base level (the level L0 files are merged - // to), and may not match the level users see from info log for metadata. - // If L0 files are merged to level-n, then, for i>0, compression_per_level[i] - // determines compaction type for level n+i-1. - // For example, if we have three 5 levels, and we determine to merge L0 - // data to L4 (which means L1..L3 will be empty), then the new files go to - // L4 uses compression type compression_per_level[1]. - // If now L0 is merged to L2. Data goes to L2 will be compressed - // according to compression_per_level[1], L3 using compression_per_level[2] - // and L4 using compression_per_level[3]. Compaction for each level can - // change when data grows. - std::vector compression_per_level; - - // Number of levels for this database - int num_levels = 7; - - // Soft limit on number of level-0 files. We start slowing down writes at this - // point. A value <0 means that no writing slow down will be triggered by - // number of files in level-0. - // - // Default: 20 - // - // Dynamically changeable through SetOptions() API - int level0_slowdown_writes_trigger = 20; - - // Maximum number of level-0 files. We stop writes at this point. - // - // Default: 36 - // - // Dynamically changeable through SetOptions() API - int level0_stop_writes_trigger = 36; - - // Target file size for compaction. - // target_file_size_base is per-file size for level-1. - // Target file size for level L can be calculated by - // target_file_size_base * (target_file_size_multiplier ^ (L-1)) - // For example, if target_file_size_base is 2MB and - // target_file_size_multiplier is 10, then each file on level-1 will - // be 2MB, and each file on level 2 will be 20MB, - // and each file on level-3 will be 200MB. - // - // Default: 64MB. - // - // Dynamically changeable through SetOptions() API - uint64_t target_file_size_base = 64 * 1048576; - - // By default target_file_size_multiplier is 1, which means - // by default files in different levels will have similar size. - // - // Dynamically changeable through SetOptions() API - int target_file_size_multiplier = 1; - - // If true, RocksDB will pick target size of each level dynamically. - // We will pick a base level b >= 1. L0 will be directly merged into level b, - // instead of always into level 1. Level 1 to b-1 need to be empty. - // We try to pick b and its target size so that - // 1. target size is in the range of - // (max_bytes_for_level_base / max_bytes_for_level_multiplier, - // max_bytes_for_level_base] - // 2. target size of the last level (level num_levels-1) equals to extra size - // of the level. - // At the same time max_bytes_for_level_multiplier and - // max_bytes_for_level_multiplier_additional are still satisfied. - // (When L0 is too large, we make some adjustment. See below.) - // - // With this option on, from an empty DB, we make last level the base level, - // which means merging L0 data into the last level, until it exceeds - // max_bytes_for_level_base. And then we make the second last level to be - // base level, to start to merge L0 data to second last level, with its - // target size to be 1/max_bytes_for_level_multiplier of the last level's - // extra size. After the data accumulates more so that we need to move the - // base level to the third last one, and so on. - // - // For example, assume max_bytes_for_level_multiplier=10, num_levels=6, - // and max_bytes_for_level_base=10MB. - // Target sizes of level 1 to 5 starts with: - // [- - - - 10MB] - // with base level is level. Target sizes of level 1 to 4 are not applicable - // because they will not be used. - // Until the size of Level 5 grows to more than 10MB, say 11MB, we make - // base target to level 4 and now the targets looks like: - // [- - - 1.1MB 11MB] - // While data are accumulated, size targets are tuned based on actual data - // of level 5. When level 5 has 50MB of data, the target is like: - // [- - - 5MB 50MB] - // Until level 5's actual size is more than 100MB, say 101MB. Now if we keep - // level 4 to be the base level, its target size needs to be 10.1MB, which - // doesn't satisfy the target size range. So now we make level 3 the target - // size and the target sizes of the levels look like: - // [- - 1.01MB 10.1MB 101MB] - // In the same way, while level 5 further grows, all levels' targets grow, - // like - // [- - 5MB 50MB 500MB] - // Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the - // base level and make levels' target sizes like this: - // [- 1.001MB 10.01MB 100.1MB 1001MB] - // and go on... - // - // By doing it, we give max_bytes_for_level_multiplier a priority against - // max_bytes_for_level_base, for a more predictable LSM tree shape. It is - // useful to limit worse case space amplification. - // - // - // If the compaction from L0 is lagged behind, a special mode will be turned - // on to prioritize write amplification against max_bytes_for_level_multiplier - // or max_bytes_for_level_base. The L0 compaction is lagged behind by looking - // at number of L0 files and total L0 size. If number of L0 files is at least - // the double of level0_file_num_compaction_trigger, or the total size is - // at least max_bytes_for_level_base, this mode is on. The target of L1 grows - // to the actual data size in L0, and then determine the target for each level - // so that each level will have the same level multiplier. - // - // For example, when L0 size is 100MB, the size of last level is 1600MB, - // max_bytes_for_level_base = 80MB, and max_bytes_for_level_multiplier = 10. - // Since L0 size is larger than max_bytes_for_level_base, this is a L0 - // compaction backlogged mode. So that the L1 size is determined to be 100MB. - // Based on max_bytes_for_level_multiplier = 10, at least 3 non-0 levels will - // be needed. The level multiplier will be calculated to be 4 and the three - // levels' target to be [100MB, 400MB, 1600MB]. - // - // In this mode, The number of levels will be no more than the normal mode, - // and the level multiplier will be lower. The write amplification will - // likely to be reduced. - // - // - // max_bytes_for_level_multiplier_additional is ignored with this flag on. - // - // Turning this feature on or off for an existing DB can cause unexpected - // LSM tree structure so it's not recommended. - // - // Default: false - bool level_compaction_dynamic_level_bytes = false; - - // Default: 10. - // - // Dynamically changeable through SetOptions() API - double max_bytes_for_level_multiplier = 10; - - // Different max-size multipliers for different levels. - // These are multiplied by max_bytes_for_level_multiplier to arrive - // at the max-size of each level. - // - // Default: 1 - // - // Dynamically changeable through SetOptions() API - std::vector max_bytes_for_level_multiplier_additional = - std::vector(num_levels, 1); - - // We try to limit number of bytes in one compaction to be lower than this - // threshold. But it's not guaranteed. - // Value 0 will be sanitized. - // - // Default: target_file_size_base * 25 - // - // Dynamically changeable through SetOptions() API - uint64_t max_compaction_bytes = 0; - - // All writes will be slowed down to at least delayed_write_rate if estimated - // bytes needed to be compaction exceed this threshold. - // - // Default: 64GB - // - // Dynamically changeable through SetOptions() API - uint64_t soft_pending_compaction_bytes_limit = 64 * 1073741824ull; - - // All writes are stopped if estimated bytes needed to be compaction exceed - // this threshold. - // - // Default: 256GB - // - // Dynamically changeable through SetOptions() API - uint64_t hard_pending_compaction_bytes_limit = 256 * 1073741824ull; - - // The compaction style. Default: kCompactionStyleLevel - CompactionStyle compaction_style = kCompactionStyleLevel; - - // If level compaction_style = kCompactionStyleLevel, for each level, - // which files are prioritized to be picked to compact. - // Default: kMinOverlappingRatio - CompactionPri compaction_pri = kMinOverlappingRatio; - - // The options needed to support Universal Style compactions - // - // Dynamically changeable through SetOptions() API - // Dynamic change example: - // SetOptions("compaction_options_universal", "{size_ratio=2;}") - CompactionOptionsUniversal compaction_options_universal; - - // The options for FIFO compaction style - // - // Dynamically changeable through SetOptions() API - // Dynamic change example: - // SetOptions("compaction_options_fifo", "{max_table_files_size=100;}") - CompactionOptionsFIFO compaction_options_fifo; - - // An iteration->Next() sequentially skips over keys with the same - // user-key unless this option is set. This number specifies the number - // of keys (with the same userkey) that will be sequentially - // skipped before a reseek is issued. - // - // Default: 8 - // - // Dynamically changeable through SetOptions() API - uint64_t max_sequential_skip_in_iterations = 8; - - // This is a factory that provides MemTableRep objects. - // Default: a factory that provides a skip-list-based implementation of - // MemTableRep. - std::shared_ptr memtable_factory = - std::shared_ptr(new SkipListFactory); - - // Block-based table related options are moved to BlockBasedTableOptions. - // Related options that were originally here but now moved include: - // no_block_cache - // block_cache - // block_cache_compressed - // block_size - // block_size_deviation - // block_restart_interval - // filter_policy - // whole_key_filtering - // If you'd like to customize some of these options, you will need to - // use NewBlockBasedTableFactory() to construct a new table factory. - - // This option allows user to collect their own interested statistics of - // the tables. - // Default: empty vector -- no user-defined statistics collection will be - // performed. - typedef std::vector> - TablePropertiesCollectorFactories; - TablePropertiesCollectorFactories table_properties_collector_factories; - - // Maximum number of successive merge operations on a key in the memtable. - // - // When a merge operation is added to the memtable and the maximum number of - // successive merges is reached, the value of the key will be calculated and - // inserted into the memtable instead of the merge operation. This will - // ensure that there are never more than max_successive_merges merge - // operations in the memtable. - // - // Default: 0 (disabled) - // - // Dynamically changeable through SetOptions() API - size_t max_successive_merges = 0; - - // This flag specifies that the implementation should optimize the filters - // mainly for cases where keys are found rather than also optimize for keys - // missed. This would be used in cases where the application knows that - // there are very few misses or the performance in the case of misses is not - // important. - // - // For now, this flag allows us to not store filters for the last level i.e - // the largest level which contains data of the LSM store. For keys which - // are hits, the filters in this level are not useful because we will search - // for the data anyway. NOTE: the filters in other levels are still useful - // even for key hit because they tell us whether to look in that level or go - // to the higher level. - // - // Default: false - bool optimize_filters_for_hits = false; - - // During flush or compaction, check whether keys inserted to output files - // are in order. - // - // Default: true - // - // Dynamically changeable through SetOptions() API - bool check_flush_compaction_key_order = true; - - // After writing every SST file, reopen it and read all the keys. - // Checks the hash of all of the keys and values written versus the - // keys in the file and signals a corruption if they do not match - // - // Default: false - // - // Dynamically changeable through SetOptions() API - bool paranoid_file_checks = false; - - // In debug mode, RocksDB runs consistency checks on the LSM every time the - // LSM changes (Flush, Compaction, AddFile). When this option is true, these - // checks are also enabled in release mode. These checks were historically - // disabled in release mode, but are now enabled by default for proactive - // corruption detection, at almost no cost in extra CPU. - // Default: true - bool force_consistency_checks = true; - - // Measure IO stats in compactions and flushes, if true. - // - // Default: false - // - // Dynamically changeable through SetOptions() API - bool report_bg_io_stats = false; - - // Files older than TTL will go through the compaction process. - // Pre-req: This needs max_open_files to be set to -1. - // In Level: Non-bottom-level files older than TTL will go through the - // compation process. - // In FIFO: Files older than TTL will be deleted. - // unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60 - // In FIFO, this option will have the same meaning as - // periodic_compaction_seconds. Whichever stricter will be used. - // 0 means disabling. - // UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to - // pick default. - // - // Default: 30 days for leveled compaction + block based table. disable - // otherwise. - // - // Dynamically changeable through SetOptions() API - uint64_t ttl = 0xfffffffffffffffe; - - // Files older than this value will be picked up for compaction, and - // re-written to the same level as they were before. - // - // A file's age is computed by looking at file_creation_time or creation_time - // table properties in order, if they have valid non-zero values; if not, the - // age is based on the file's last modified time (given by the underlying - // Env). - // - // Supported in Level and FIFO compaction. - // In FIFO compaction, this option has the same meaning as TTL and whichever - // stricter will be used. - // Pre-req: max_open_file == -1. - // unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60 - // - // Values: - // 0: Turn off Periodic compactions. - // UINT64_MAX - 1 (i.e 0xfffffffffffffffe): Let RocksDB control this feature - // as needed. For now, RocksDB will change this value to 30 days - // (i.e 30 * 24 * 60 * 60) so that every file goes through the compaction - // process at least once every 30 days if not compacted sooner. - // In FIFO compaction, since the option has the same meaning as ttl, - // when this value is left default, and ttl is left to 0, 30 days will be - // used. Otherwise, min(ttl, periodic_compaction_seconds) will be used. - // - // Default: UINT64_MAX - 1 (allow RocksDB to auto-tune) - // - // Dynamically changeable through SetOptions() API - uint64_t periodic_compaction_seconds = 0xfffffffffffffffe; - - // If this option is set then 1 in N blocks are compressed - // using a fast (lz4) and slow (zstd) compression algorithm. - // The compressibility is reported as stats and the stored - // data is left uncompressed (unless compression is also requested). - uint64_t sample_for_compression = 0; - - // UNDER CONSTRUCTION -- DO NOT USE - // When set, large values (blobs) are written to separate blob files, and - // only pointers to them are stored in SST files. This can reduce write - // amplification for large-value use cases at the cost of introducing a level - // of indirection for reads. See also the options min_blob_size, - // blob_file_size, and blob_compression_type below. - // - // Default: false - // - // Dynamically changeable through the SetOptions() API - bool enable_blob_files = false; - - // UNDER CONSTRUCTION -- DO NOT USE - // The size of the smallest value to be stored separately in a blob file. - // Values which have an uncompressed size smaller than this threshold are - // stored alongside the keys in SST files in the usual fashion. A value of - // zero for this option means that all values are stored in blob files. Note - // that enable_blob_files has to be set in order for this option to have any - // effect. - // - // Default: 0 - // - // Dynamically changeable through the SetOptions() API - uint64_t min_blob_size = 0; - - // UNDER CONSTRUCTION -- DO NOT USE - // The size limit for blob files. When writing blob files, a new file is - // opened once this limit is reached. Note that enable_blob_files has to be - // set in order for this option to have any effect. - // - // Default: 256 MB - // - // Dynamically changeable through the SetOptions() API - uint64_t blob_file_size = 1ULL << 28; - - // UNDER CONSTRUCTION -- DO NOT USE - // The compression algorithm to use for large values stored in blob files. - // Note that enable_blob_files has to be set in order for this option to have - // any effect. - // - // Default: no compression - // - // Dynamically changeable through the SetOptions() API - CompressionType blob_compression_type = kNoCompression; - - // Create ColumnFamilyOptions with default values for all fields - AdvancedColumnFamilyOptions(); - // Create ColumnFamilyOptions from Options - explicit AdvancedColumnFamilyOptions(const Options& options); - - // ---------------- OPTIONS NOT SUPPORTED ANYMORE ---------------- - - // NOT SUPPORTED ANYMORE - // This does not do anything anymore. - int max_mem_compaction_level; - - // NOT SUPPORTED ANYMORE -- this options is no longer used - // Puts are delayed to options.delayed_write_rate when any level has a - // compaction score that exceeds soft_rate_limit. This is ignored when == 0.0. - // - // Default: 0 (disabled) - // - // Dynamically changeable through SetOptions() API - double soft_rate_limit = 0.0; - - // NOT SUPPORTED ANYMORE -- this options is no longer used - double hard_rate_limit = 0.0; - - // NOT SUPPORTED ANYMORE -- this options is no longer used - unsigned int rate_limit_delay_max_milliseconds = 100; - - // NOT SUPPORTED ANYMORE - // Does not have any effect. - bool purge_redundant_kvs_while_flush = true; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/c.h b/dist/darwin_amd64/include/rocksdb/c.h deleted file mode 100644 index b8c72ae..0000000 --- a/dist/darwin_amd64/include/rocksdb/c.h +++ /dev/null @@ -1,2090 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. - Use of this source code is governed by a BSD-style license that can be - found in the LICENSE file. See the AUTHORS file for names of contributors. - - C bindings for rocksdb. May be useful as a stable ABI that can be - used by programs that keep rocksdb in a shared library, or for - a JNI api. - - Does not support: - . getters for the option types - . custom comparators that implement key shortening - . capturing post-write-snapshot - . custom iter, db, env, cache implementations using just the C bindings - - Some conventions: - - (1) We expose just opaque struct pointers and functions to clients. - This allows us to change internal representations without having to - recompile clients. - - (2) For simplicity, there is no equivalent to the Slice type. Instead, - the caller has to pass the pointer and length as separate - arguments. - - (3) Errors are represented by a null-terminated c string. NULL - means no error. All operations that can raise an error are passed - a "char** errptr" as the last argument. One of the following must - be true on entry: - *errptr == NULL - *errptr points to a malloc()ed null-terminated error message - On success, a leveldb routine leaves *errptr unchanged. - On failure, leveldb frees the old value of *errptr and - set *errptr to a malloc()ed error message. - - (4) Bools have the type unsigned char (0 == false; rest == true) - - (5) All of the pointer arguments must be non-NULL. -*/ - -#pragma once - -#ifdef _WIN32 -#ifdef ROCKSDB_DLL -#ifdef ROCKSDB_LIBRARY_EXPORTS -#define ROCKSDB_LIBRARY_API __declspec(dllexport) -#else -#define ROCKSDB_LIBRARY_API __declspec(dllimport) -#endif -#else -#define ROCKSDB_LIBRARY_API -#endif -#else -#define ROCKSDB_LIBRARY_API -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/* Exported types */ - -typedef struct rocksdb_t rocksdb_t; -typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; -typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; -typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; -typedef struct rocksdb_cache_t rocksdb_cache_t; -typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; -typedef struct rocksdb_compactionfiltercontext_t - rocksdb_compactionfiltercontext_t; -typedef struct rocksdb_compactionfilterfactory_t - rocksdb_compactionfilterfactory_t; -typedef struct rocksdb_comparator_t rocksdb_comparator_t; -typedef struct rocksdb_dbpath_t rocksdb_dbpath_t; -typedef struct rocksdb_env_t rocksdb_env_t; -typedef struct rocksdb_fifo_compaction_options_t rocksdb_fifo_compaction_options_t; -typedef struct rocksdb_filelock_t rocksdb_filelock_t; -typedef struct rocksdb_filterpolicy_t rocksdb_filterpolicy_t; -typedef struct rocksdb_flushoptions_t rocksdb_flushoptions_t; -typedef struct rocksdb_iterator_t rocksdb_iterator_t; -typedef struct rocksdb_logger_t rocksdb_logger_t; -typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t; -typedef struct rocksdb_options_t rocksdb_options_t; -typedef struct rocksdb_compactoptions_t rocksdb_compactoptions_t; -typedef struct rocksdb_block_based_table_options_t - rocksdb_block_based_table_options_t; -typedef struct rocksdb_cuckoo_table_options_t - rocksdb_cuckoo_table_options_t; -typedef struct rocksdb_randomfile_t rocksdb_randomfile_t; -typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; -typedef struct rocksdb_seqfile_t rocksdb_seqfile_t; -typedef struct rocksdb_slicetransform_t rocksdb_slicetransform_t; -typedef struct rocksdb_snapshot_t rocksdb_snapshot_t; -typedef struct rocksdb_writablefile_t rocksdb_writablefile_t; -typedef struct rocksdb_writebatch_t rocksdb_writebatch_t; -typedef struct rocksdb_writebatch_wi_t rocksdb_writebatch_wi_t; -typedef struct rocksdb_writeoptions_t rocksdb_writeoptions_t; -typedef struct rocksdb_universal_compaction_options_t rocksdb_universal_compaction_options_t; -typedef struct rocksdb_livefiles_t rocksdb_livefiles_t; -typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t; -typedef struct rocksdb_envoptions_t rocksdb_envoptions_t; -typedef struct rocksdb_ingestexternalfileoptions_t rocksdb_ingestexternalfileoptions_t; -typedef struct rocksdb_sstfilewriter_t rocksdb_sstfilewriter_t; -typedef struct rocksdb_ratelimiter_t rocksdb_ratelimiter_t; -typedef struct rocksdb_perfcontext_t rocksdb_perfcontext_t; -typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t; -typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t; -typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t; -typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t; -typedef struct rocksdb_optimistictransactiondb_t - rocksdb_optimistictransactiondb_t; -typedef struct rocksdb_optimistictransaction_options_t - rocksdb_optimistictransaction_options_t; -typedef struct rocksdb_transaction_t rocksdb_transaction_t; -typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t; -typedef struct rocksdb_wal_iterator_t rocksdb_wal_iterator_t; -typedef struct rocksdb_wal_readoptions_t rocksdb_wal_readoptions_t; -typedef struct rocksdb_memory_consumers_t rocksdb_memory_consumers_t; -typedef struct rocksdb_memory_usage_t rocksdb_memory_usage_t; - -/* DB operations */ - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open( - const rocksdb_options_t* options, const char* name, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_with_ttl( - const rocksdb_options_t* options, const char* name, int ttl, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only( - const rocksdb_options_t* options, const char* name, - unsigned char error_if_wal_file_exists, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary( - const rocksdb_options_t* options, const char* name, - const char* secondary_path, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( - const rocksdb_options_t* options, const char* path, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( - rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup_flush( - rocksdb_backup_engine_t* be, rocksdb_t* db, unsigned char flush_before_backup, - char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups( - rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t* -rocksdb_restore_options_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy( - rocksdb_restore_options_t* opt); -extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files( - rocksdb_restore_options_t* opt, int v); - -extern ROCKSDB_LIBRARY_API void -rocksdb_backup_engine_verify_backup(rocksdb_backup_engine_t* be, - uint32_t backup_id, char** errptr); - -extern ROCKSDB_LIBRARY_API void -rocksdb_backup_engine_restore_db_from_latest_backup( - rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, - const rocksdb_restore_options_t* restore_options, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( - rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, - const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, - char** errptr); - -extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* -rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); - -extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count( - const rocksdb_backup_engine_info_t* info); - -extern ROCKSDB_LIBRARY_API int64_t -rocksdb_backup_engine_info_timestamp(const rocksdb_backup_engine_info_t* info, - int index); - -extern ROCKSDB_LIBRARY_API uint32_t -rocksdb_backup_engine_info_backup_id(const rocksdb_backup_engine_info_t* info, - int index); - -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_backup_engine_info_size(const rocksdb_backup_engine_info_t* info, - int index); - -extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files( - const rocksdb_backup_engine_info_t* info, int index); - -extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( - const rocksdb_backup_engine_info_t* info); - -extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( - rocksdb_backup_engine_t* be); - -extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* -rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_create( - rocksdb_checkpoint_t* checkpoint, const char* checkpoint_dir, - uint64_t log_size_for_flush, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_object_destroy( - rocksdb_checkpoint_t* checkpoint); - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families( - const rocksdb_options_t* options, const char* name, int num_column_families, - const char* const* column_family_names, - const rocksdb_options_t* const* column_family_options, - rocksdb_column_family_handle_t** column_family_handles, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families_with_ttl( - const rocksdb_options_t* options, const char* name, int num_column_families, - const char* const* column_family_names, - const rocksdb_options_t* const* column_family_options, - rocksdb_column_family_handle_t** column_family_handles, const int* ttls, - char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* -rocksdb_open_for_read_only_column_families( - const rocksdb_options_t* options, const char* name, int num_column_families, - const char* const* column_family_names, - const rocksdb_options_t* const* column_family_options, - rocksdb_column_family_handle_t** column_family_handles, - unsigned char error_if_wal_file_exists, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary_column_families( - const rocksdb_options_t* options, const char* name, - const char* secondary_path, int num_column_families, - const char* const* column_family_names, - const rocksdb_options_t* const* column_family_options, - rocksdb_column_family_handle_t** colummn_family_handles, char** errptr); - -extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families( - const rocksdb_options_t* options, const char* name, size_t* lencf, - char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy( - char** list, size_t len); - -extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* -rocksdb_create_column_family(rocksdb_t* db, - const rocksdb_options_t* column_family_options, - const char* column_family_name, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* -rocksdb_create_column_family_with_ttl( - rocksdb_t* db, const rocksdb_options_t* column_family_options, - const char* column_family_name, int ttl, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family( - rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy( - rocksdb_column_family_handle_t*); - -extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db); - -extern ROCKSDB_LIBRARY_API void rocksdb_put( - rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, - size_t keylen, const char* val, size_t vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_put_cf( - rocksdb_t* db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, const char* val, size_t vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_delete( - rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, - size_t keylen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf( - rocksdb_t* db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_delete_range_cf( - rocksdb_t* db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* start_key, - size_t start_key_len, const char* end_key, size_t end_key_len, - char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_merge( - rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, - size_t keylen, const char* val, size_t vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf( - rocksdb_t* db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, const char* val, size_t vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_write( - rocksdb_t* db, const rocksdb_writeoptions_t* options, - rocksdb_writebatch_t* batch, char** errptr); - -/* Returns NULL if not found. A malloc()ed array otherwise. - Stores the length of the array in *vallen. */ -extern ROCKSDB_LIBRARY_API char* rocksdb_get( - rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, - size_t keylen, size_t* vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf( - rocksdb_t* db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, size_t* vallen, char** errptr); - -// if values_list[i] == NULL and errs[i] == NULL, -// then we got status.IsNotFound(), which we will not return. -// all errors except status status.ok() and status.IsNotFound() are returned. -// -// errs, values_list and values_list_sizes must be num_keys in length, -// allocated by the caller. -// errs is a list of strings as opposed to the conventional one error, -// where errs[i] is the status for retrieval of keys_list[i]. -// each non-NULL errs entry is a malloc()ed, null terminated string. -// each non-NULL values_list entry is a malloc()ed array, with -// the length for each stored in values_list_sizes[i]. -extern ROCKSDB_LIBRARY_API void rocksdb_multi_get( - rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, - const char* const* keys_list, const size_t* keys_list_sizes, - char** values_list, size_t* values_list_sizes, char** errs); - -extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( - rocksdb_t* db, const rocksdb_readoptions_t* options, - const rocksdb_column_family_handle_t* const* column_families, - size_t num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, char** values_list, - size_t* values_list_sizes, char** errs); - -// The value is only allocated (using malloc) and returned if it is found and -// value_found isn't NULL. In that case the user is responsible for freeing it. -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( - rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, - size_t key_len, char** value, size_t* val_len, const char* timestamp, - size_t timestamp_len, unsigned char* value_found); - -// The value is only allocated (using malloc) and returned if it is found and -// value_found isn't NULL. In that case the user is responsible for freeing it. -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( - rocksdb_t* db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t key_len, char** value, size_t* val_len, const char* timestamp, - size_t timestamp_len, unsigned char* value_found); - -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( - rocksdb_t* db, const rocksdb_readoptions_t* options); - -extern ROCKSDB_LIBRARY_API rocksdb_wal_iterator_t* rocksdb_get_updates_since( - rocksdb_t* db, uint64_t seq_number, - const rocksdb_wal_readoptions_t* options, - char** errptr -); - -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf( - rocksdb_t* db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family); - -extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators( - rocksdb_t *db, rocksdb_readoptions_t* opts, - rocksdb_column_family_handle_t** column_families, - rocksdb_iterator_t** iterators, size_t size, char** errptr); - -extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot( - rocksdb_t* db); - -extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot( - rocksdb_t* db, const rocksdb_snapshot_t* snapshot); - -/* Returns NULL if property name is unknown. - Else returns a pointer to a malloc()-ed null-terminated value. */ -extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db, - const char* propname); -/* returns 0 on success, -1 otherwise */ -int rocksdb_property_int( - rocksdb_t* db, - const char* propname, uint64_t *out_val); - -/* returns 0 on success, -1 otherwise */ -int rocksdb_property_int_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* column_family, - const char* propname, uint64_t *out_val); - -extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* column_family, - const char* propname); - -extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( - rocksdb_t* db, int num_ranges, const char* const* range_start_key, - const size_t* range_start_key_len, const char* const* range_limit_key, - const size_t* range_limit_key_len, uint64_t* sizes); - -extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* column_family, - int num_ranges, const char* const* range_start_key, - const size_t* range_start_key_len, const char* const* range_limit_key, - const size_t* range_limit_key_len, uint64_t* sizes); - -extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, - const char* start_key, - size_t start_key_len, - const char* limit_key, - size_t limit_key_len); - -extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* column_family, - const char* start_key, size_t start_key_len, const char* limit_key, - size_t limit_key_len); - -extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_opt( - rocksdb_t* db, rocksdb_compactoptions_t* opt, const char* start_key, - size_t start_key_len, const char* limit_key, size_t limit_key_len); - -extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt( - rocksdb_t* db, rocksdb_column_family_handle_t* column_family, - rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len, - const char* limit_key, size_t limit_key_len); - -extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db, - const char* name); - -extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles( - rocksdb_t* db); - -extern ROCKSDB_LIBRARY_API void rocksdb_flush( - rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_flush_cf( - rocksdb_t* db, const rocksdb_flushoptions_t* options, - rocksdb_column_family_handle_t* column_family, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db, - char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions( - rocksdb_t* db, unsigned char force, char** errptr); - -/* Management operations */ - -extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db( - const rocksdb_options_t* options, const char* name, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_repair_db( - const rocksdb_options_t* options, const char* name, char** errptr); - -/* Iterator */ - -extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid( - const rocksdb_iterator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*, - const char* k, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_for_prev(rocksdb_iterator_t*, - const char* k, - size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*); -extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key( - const rocksdb_iterator_t*, size_t* klen); -extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value( - const rocksdb_iterator_t*, size_t* vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error( - const rocksdb_iterator_t*, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_next(rocksdb_wal_iterator_t* iter); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_wal_iter_valid( - const rocksdb_wal_iterator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_status (const rocksdb_wal_iterator_t* iter, char** errptr) ; -extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_wal_iter_get_batch (const rocksdb_wal_iterator_t* iter, uint64_t* seq) ; -extern ROCKSDB_LIBRARY_API uint64_t rocksdb_get_latest_sequence_number (rocksdb_t *db); -extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_destroy (const rocksdb_wal_iterator_t* iter) ; - -/* Write batch */ - -extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create(); -extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from( - const char* rep, size_t size); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy( - rocksdb_writebatch_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*); -extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*, - const char* key, - size_t klen, - const char* val, - size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf( - rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, const char* val, size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv( - rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, int num_values, - const char* const* values_list, const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf( - rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*, - const char* key, - size_t klen, - const char* val, - size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf( - rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, const char* val, size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev( - rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, int num_values, - const char* const* values_list, const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf( - rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*, - const char* key, - size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete( - rocksdb_writebatch_t* b, const char* key, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf( - rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf( - rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev( - rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf( - rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range( - rocksdb_writebatch_t* b, const char* start_key, size_t start_key_len, - const char* end_key, size_t end_key_len); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range_cf( - rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, - const char* start_key, size_t start_key_len, const char* end_key, - size_t end_key_len); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev( - rocksdb_writebatch_t* b, int num_keys, const char* const* start_keys_list, - const size_t* start_keys_list_sizes, const char* const* end_keys_list, - const size_t* end_keys_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev_cf( - rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* start_keys_list, - const size_t* start_keys_list_sizes, const char* const* end_keys_list, - const size_t* end_keys_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data( - rocksdb_writebatch_t*, const char* blob, size_t len); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate( - rocksdb_writebatch_t*, void* state, - void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), - void (*deleted)(void*, const char* k, size_t klen)); -extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data( - rocksdb_writebatch_t*, size_t* size); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_set_save_point( - rocksdb_writebatch_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_rollback_to_save_point( - rocksdb_writebatch_t*, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_pop_save_point( - rocksdb_writebatch_t*, char** errptr); - -/* Write batch with index */ - -extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create( - size_t reserved_bytes, - unsigned char overwrite_keys); -extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create_from( - const char* rep, size_t size); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_destroy( - rocksdb_writebatch_wi_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_clear(rocksdb_writebatch_wi_t*); -extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_wi_count(rocksdb_writebatch_wi_t* b); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put(rocksdb_writebatch_wi_t*, - const char* key, - size_t klen, - const char* val, - size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_cf( - rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, const char* val, size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv( - rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, int num_values, - const char* const* values_list, const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv_cf( - rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge(rocksdb_writebatch_wi_t*, - const char* key, - size_t klen, - const char* val, - size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge_cf( - rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, const char* val, size_t vlen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev( - rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes, int num_values, - const char* const* values_list, const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev_cf( - rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, - int num_values, const char* const* values_list, - const size_t* values_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete(rocksdb_writebatch_wi_t*, - const char* key, - size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete( - rocksdb_writebatch_wi_t*, const char* key, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_cf( - rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete_cf( - rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev( - rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, - const size_t* keys_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev_cf( - rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); -// DO NOT USE - rocksdb_writebatch_wi_delete_range is not yet supported -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range( - rocksdb_writebatch_wi_t* b, const char* start_key, size_t start_key_len, - const char* end_key, size_t end_key_len); -// DO NOT USE - rocksdb_writebatch_wi_delete_range_cf is not yet supported -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range_cf( - rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, - const char* start_key, size_t start_key_len, const char* end_key, - size_t end_key_len); -// DO NOT USE - rocksdb_writebatch_wi_delete_rangev is not yet supported -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev( - rocksdb_writebatch_wi_t* b, int num_keys, const char* const* start_keys_list, - const size_t* start_keys_list_sizes, const char* const* end_keys_list, - const size_t* end_keys_list_sizes); -// DO NOT USE - rocksdb_writebatch_wi_delete_rangev_cf is not yet supported -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev_cf( - rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, - int num_keys, const char* const* start_keys_list, - const size_t* start_keys_list_sizes, const char* const* end_keys_list, - const size_t* end_keys_list_sizes); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_log_data( - rocksdb_writebatch_wi_t*, const char* blob, size_t len); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_iterate( - rocksdb_writebatch_wi_t* b, - void* state, - void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), - void (*deleted)(void*, const char* k, size_t klen)); -extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_wi_data( - rocksdb_writebatch_wi_t* b, - size_t* size); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_set_save_point( - rocksdb_writebatch_wi_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_rollback_to_save_point( - rocksdb_writebatch_wi_t*, char** errptr); -extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch( - rocksdb_writebatch_wi_t* wbwi, - const rocksdb_options_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr); -extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_cf( - rocksdb_writebatch_wi_t* wbwi, - const rocksdb_options_t* options, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t keylen, - size_t* vallen, - char** errptr); -extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_t* db, - const rocksdb_readoptions_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr); -extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_t* db, - const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, - const char* key, size_t keylen, - size_t* vallen, - char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_write_writebatch_wi( - rocksdb_t* db, - const rocksdb_writeoptions_t* options, - rocksdb_writebatch_wi_t* wbwi, - char** errptr); -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_iterator_t* base_iterator); -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_iterator_t* base_iterator, - rocksdb_column_family_handle_t* cf); - -/* Block based table options */ - -extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* -rocksdb_block_based_options_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy( - rocksdb_block_based_table_options_t* options); -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size( - rocksdb_block_based_table_options_t* options, size_t block_size); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_block_size_deviation( - rocksdb_block_based_table_options_t* options, int block_size_deviation); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_block_restart_interval( - rocksdb_block_based_table_options_t* options, int block_restart_interval); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_index_block_restart_interval( - rocksdb_block_based_table_options_t* options, int index_block_restart_interval); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_metadata_block_size( - rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_partition_filters( - rocksdb_block_based_table_options_t* options, unsigned char partition_filters); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_use_delta_encoding( - rocksdb_block_based_table_options_t* options, unsigned char use_delta_encoding); -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy( - rocksdb_block_based_table_options_t* options, - rocksdb_filterpolicy_t* filter_policy); -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache( - rocksdb_block_based_table_options_t* options, unsigned char no_block_cache); -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache( - rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_block_cache_compressed( - rocksdb_block_based_table_options_t* options, - rocksdb_cache_t* block_cache_compressed); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_whole_key_filtering( - rocksdb_block_based_table_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version( - rocksdb_block_based_table_options_t*, int); -enum { - rocksdb_block_based_table_index_type_binary_search = 0, - rocksdb_block_based_table_index_type_hash_search = 1, - rocksdb_block_based_table_index_type_two_level_index_search = 2, -}; -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type( - rocksdb_block_based_table_options_t*, int); // uses one of the above enums -enum { - rocksdb_block_based_table_data_block_index_type_binary_search = 0, - rocksdb_block_based_table_data_block_index_type_binary_search_and_hash = 1, -}; -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_data_block_index_type( - rocksdb_block_based_table_options_t*, int); // uses one of the above enums -extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_data_block_hash_ratio( - rocksdb_block_based_table_options_t* options, double v); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_hash_index_allow_collision( - rocksdb_block_based_table_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_cache_index_and_filter_blocks( - rocksdb_block_based_table_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority( - rocksdb_block_based_table_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( - rocksdb_block_based_table_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void -rocksdb_block_based_options_set_pin_top_level_index_and_filter( - rocksdb_block_based_table_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory( - rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options); - -/* Cuckoo table options */ - -extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t* -rocksdb_cuckoo_options_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy( - rocksdb_cuckoo_table_options_t* options); -extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio( - rocksdb_cuckoo_table_options_t* options, double v); -extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth( - rocksdb_cuckoo_table_options_t* options, uint32_t v); -extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size( - rocksdb_cuckoo_table_options_t* options, uint32_t v); -extern ROCKSDB_LIBRARY_API void -rocksdb_cuckoo_options_set_identity_as_first_hash( - rocksdb_cuckoo_table_options_t* options, unsigned char v); -extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash( - rocksdb_cuckoo_table_options_t* options, unsigned char v); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory( - rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options); - -/* Options */ -extern ROCKSDB_LIBRARY_API void rocksdb_set_options( - rocksdb_t* db, int count, const char* const keys[], const char* const values[], char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_set_options_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, const char* const keys[], const char* const values[], char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create_copy( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism( - rocksdb_options_t* opt, int total_threads); -extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup( - rocksdb_options_t* opt, uint64_t block_cache_size_mb); -extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction( - rocksdb_options_t* opt, uint64_t memtable_memory_budget); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_optimize_universal_style_compaction( - rocksdb_options_t* opt, uint64_t memtable_memory_budget); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_allow_ingest_behind(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_allow_ingest_behind(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter( - rocksdb_options_t*, rocksdb_compactionfilter_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory( - rocksdb_options_t*, rocksdb_compactionfilterfactory_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_compaction_readahead_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator( - rocksdb_options_t*, rocksdb_comparator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator( - rocksdb_options_t*, rocksdb_mergeoperator_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level( - rocksdb_options_t* opt, int* level_values, size_t num_levels); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_create_if_missing( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_create_missing_column_families(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_create_missing_column_families(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_error_if_exists( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_paranoid_checks( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths(rocksdb_options_t*, - const rocksdb_dbpath_t** path_values, - size_t num_paths); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, - rocksdb_env_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, - rocksdb_logger_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_info_log_level( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_write_buffer_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_db_write_buffer_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_open_files( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_file_opening_threads( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_file_opening_threads( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size( - rocksdb_options_t* opt, uint64_t n); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options( - rocksdb_options_t*, int, int, int, int); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_compression_options_zstd_max_train_bytes(rocksdb_options_t*, - int); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_bottommost_compression_options(rocksdb_options_t*, int, int, - int, int, unsigned char); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes( - rocksdb_options_t*, int, unsigned char); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor( - rocksdb_options_t*, rocksdb_slicetransform_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_num_levels( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int -rocksdb_options_get_level0_file_num_compaction_trigger(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int -rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_level0_stop_writes_trigger( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_mem_compaction_level( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_target_file_size_base(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_target_file_size_multiplier( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_max_bytes_for_level_base(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_level_compaction_dynamic_level_bytes(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_level_compaction_dynamic_level_bytes(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, double); -extern ROCKSDB_LIBRARY_API double -rocksdb_options_get_max_bytes_for_level_multiplier(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_max_bytes_for_level_multiplier_additional( - rocksdb_options_t*, int* level_values, size_t num_levels); -extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, - unsigned char val); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_skip_stats_update_on_db_open(rocksdb_options_t* opt); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open( - rocksdb_options_t* opt, unsigned char val); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_skip_checking_sst_file_sizes_on_db_open( - rocksdb_options_t* opt); - -/* returns a pointer to a malloc()-ed, null terminated string */ -extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( - rocksdb_options_t* opt); - -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_write_buffer_number( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int -rocksdb_options_get_min_write_buffer_number_to_merge(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*, - int); -extern ROCKSDB_LIBRARY_API int -rocksdb_options_get_max_write_buffer_number_to_maintain(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_max_write_buffer_size_to_maintain(rocksdb_options_t*, - int64_t); -extern ROCKSDB_LIBRARY_API int64_t -rocksdb_options_get_max_write_buffer_size_to_maintain(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_pipelined_write( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_enable_pipelined_write(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_unordered_write( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_unordered_write( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_subcompactions( - rocksdb_options_t*, uint32_t); -extern ROCKSDB_LIBRARY_API uint32_t -rocksdb_options_get_max_subcompactions(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_base_background_compactions( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_base_background_compactions( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_max_log_file_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_rate_limit( - rocksdb_options_t*, double); -extern ROCKSDB_LIBRARY_API double rocksdb_options_get_soft_rate_limit( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_rate_limit( - rocksdb_options_t*, double); -extern ROCKSDB_LIBRARY_API double rocksdb_options_get_hard_rate_limit( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_pending_compaction_bytes_limit( - rocksdb_options_t* opt, size_t v); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_pending_compaction_bytes_limit( - rocksdb_options_t* opt, size_t v); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_rate_limit_delay_max_milliseconds(rocksdb_options_t*, - unsigned int); -extern ROCKSDB_LIBRARY_API unsigned int -rocksdb_options_get_rate_limit_delay_max_milliseconds(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_table_cache_remove_scan_count_limit(rocksdb_options_t*, - int); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_arena_block_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( - rocksdb_options_t*, const char*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, - const char*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_log_error_on_recovery( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_skip_log_error_on_recovery(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( - rocksdb_options_t*, unsigned int); -extern ROCKSDB_LIBRARY_API unsigned int -rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( - rocksdb_options_t*, unsigned int); -extern ROCKSDB_LIBRARY_API unsigned int -rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int -rocksdb_options_get_access_hint_on_compaction_start(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, - uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, - uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_prefix_bloom_size_ratio( - rocksdb_options_t*, double); -extern ROCKSDB_LIBRARY_API double -rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( - rocksdb_options_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( - rocksdb_options_t*, size_t, int32_t, int32_t); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory( - rocksdb_options_t*, uint32_t, int, double, size_t); - -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( - rocksdb_options_t* opt, int level); - -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); - -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_max_successive_merges(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( - rocksdb_options_t*, uint32_t); -extern ROCKSDB_LIBRARY_API uint32_t -rocksdb_options_get_bloom_locality(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( - rocksdb_options_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_options_get_inplace_update_support(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( - rocksdb_options_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( - rocksdb_options_t*); - -enum { - rocksdb_tolerate_corrupted_tail_records_recovery = 0, - rocksdb_absolute_consistency_recovery = 1, - rocksdb_point_in_time_recovery = 2, - rocksdb_skip_any_corrupted_records_recovery = 3 -}; -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( - rocksdb_options_t*); - -enum { - rocksdb_no_compression = 0, - rocksdb_snappy_compression = 1, - rocksdb_zlib_compression = 2, - rocksdb_bz2_compression = 3, - rocksdb_lz4_compression = 4, - rocksdb_lz4hc_compression = 5, - rocksdb_xpress_compression = 6, - rocksdb_zstd_compression = 7 -}; -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( - rocksdb_options_t*); - -enum { - rocksdb_level_compaction = 0, - rocksdb_universal_compaction = 1, - rocksdb_fifo_compaction = 2 -}; -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( - rocksdb_options_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( - rocksdb_options_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_universal_compaction_options( - rocksdb_options_t*, rocksdb_universal_compaction_options_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options( - rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( - rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( - rocksdb_options_t* opt, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( - rocksdb_options_t* opt); - -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( - rocksdb_options_t* opt, rocksdb_cache_t* cache -); - -/* RateLimiter */ -extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* rocksdb_ratelimiter_create( - int64_t rate_bytes_per_sec, int64_t refill_period_us, int32_t fairness); -extern ROCKSDB_LIBRARY_API void rocksdb_ratelimiter_destroy(rocksdb_ratelimiter_t*); - -/* PerfContext */ -enum { - rocksdb_uninitialized = 0, - rocksdb_disable = 1, - rocksdb_enable_count = 2, - rocksdb_enable_time_except_for_mutex = 3, - rocksdb_enable_time = 4, - rocksdb_out_of_bounds = 5 -}; - -enum { - rocksdb_user_key_comparison_count = 0, - rocksdb_block_cache_hit_count, - rocksdb_block_read_count, - rocksdb_block_read_byte, - rocksdb_block_read_time, - rocksdb_block_checksum_time, - rocksdb_block_decompress_time, - rocksdb_get_read_bytes, - rocksdb_multiget_read_bytes, - rocksdb_iter_read_bytes, - rocksdb_internal_key_skipped_count, - rocksdb_internal_delete_skipped_count, - rocksdb_internal_recent_skipped_count, - rocksdb_internal_merge_count, - rocksdb_get_snapshot_time, - rocksdb_get_from_memtable_time, - rocksdb_get_from_memtable_count, - rocksdb_get_post_process_time, - rocksdb_get_from_output_files_time, - rocksdb_seek_on_memtable_time, - rocksdb_seek_on_memtable_count, - rocksdb_next_on_memtable_count, - rocksdb_prev_on_memtable_count, - rocksdb_seek_child_seek_time, - rocksdb_seek_child_seek_count, - rocksdb_seek_min_heap_time, - rocksdb_seek_max_heap_time, - rocksdb_seek_internal_seek_time, - rocksdb_find_next_user_entry_time, - rocksdb_write_wal_time, - rocksdb_write_memtable_time, - rocksdb_write_delay_time, - rocksdb_write_pre_and_post_process_time, - rocksdb_db_mutex_lock_nanos, - rocksdb_db_condition_wait_nanos, - rocksdb_merge_operator_time_nanos, - rocksdb_read_index_block_nanos, - rocksdb_read_filter_block_nanos, - rocksdb_new_table_block_iter_nanos, - rocksdb_new_table_iterator_nanos, - rocksdb_block_seek_nanos, - rocksdb_find_table_nanos, - rocksdb_bloom_memtable_hit_count, - rocksdb_bloom_memtable_miss_count, - rocksdb_bloom_sst_hit_count, - rocksdb_bloom_sst_miss_count, - rocksdb_key_lock_wait_time, - rocksdb_key_lock_wait_count, - rocksdb_env_new_sequential_file_nanos, - rocksdb_env_new_random_access_file_nanos, - rocksdb_env_new_writable_file_nanos, - rocksdb_env_reuse_writable_file_nanos, - rocksdb_env_new_random_rw_file_nanos, - rocksdb_env_new_directory_nanos, - rocksdb_env_file_exists_nanos, - rocksdb_env_get_children_nanos, - rocksdb_env_get_children_file_attributes_nanos, - rocksdb_env_delete_file_nanos, - rocksdb_env_create_dir_nanos, - rocksdb_env_create_dir_if_missing_nanos, - rocksdb_env_delete_dir_nanos, - rocksdb_env_get_file_size_nanos, - rocksdb_env_get_file_modification_time_nanos, - rocksdb_env_rename_file_nanos, - rocksdb_env_link_file_nanos, - rocksdb_env_lock_file_nanos, - rocksdb_env_unlock_file_nanos, - rocksdb_env_new_logger_nanos, - rocksdb_total_metric_count = 68 -}; - -extern ROCKSDB_LIBRARY_API void rocksdb_set_perf_level(int); -extern ROCKSDB_LIBRARY_API rocksdb_perfcontext_t* rocksdb_perfcontext_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_reset( - rocksdb_perfcontext_t* context); -extern ROCKSDB_LIBRARY_API char* rocksdb_perfcontext_report( - rocksdb_perfcontext_t* context, unsigned char exclude_zero_counters); -extern ROCKSDB_LIBRARY_API uint64_t rocksdb_perfcontext_metric( - rocksdb_perfcontext_t* context, int metric); -extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_destroy( - rocksdb_perfcontext_t* context); - -/* Compaction Filter */ - -extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t* -rocksdb_compactionfilter_create( - void* state, void (*destructor)(void*), - unsigned char (*filter)(void*, int level, const char* key, - size_t key_length, const char* existing_value, - size_t value_length, char** new_value, - size_t* new_value_length, - unsigned char* value_changed), - const char* (*name)(void*)); -extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_set_ignore_snapshots( - rocksdb_compactionfilter_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy( - rocksdb_compactionfilter_t*); - -/* Compaction Filter Context */ - -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_compactionfiltercontext_is_full_compaction( - rocksdb_compactionfiltercontext_t* context); - -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_compactionfiltercontext_is_manual_compaction( - rocksdb_compactionfiltercontext_t* context); - -/* Compaction Filter Factory */ - -extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t* -rocksdb_compactionfilterfactory_create( - void* state, void (*destructor)(void*), - rocksdb_compactionfilter_t* (*create_compaction_filter)( - void*, rocksdb_compactionfiltercontext_t* context), - const char* (*name)(void*)); -extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy( - rocksdb_compactionfilterfactory_t*); - -/* Comparator */ - -extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create( - void* state, void (*destructor)(void*), - int (*compare)(void*, const char* a, size_t alen, const char* b, - size_t blen), - const char* (*name)(void*)); -extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy( - rocksdb_comparator_t*); - -/* Filter policy */ - -extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* rocksdb_filterpolicy_create( - void* state, void (*destructor)(void*), - char* (*create_filter)(void*, const char* const* key_array, - const size_t* key_length_array, int num_keys, - size_t* filter_length), - unsigned char (*key_may_match)(void*, const char* key, size_t length, - const char* filter, size_t filter_length), - void (*delete_filter)(void*, const char* filter, size_t filter_length), - const char* (*name)(void*)); -extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy( - rocksdb_filterpolicy_t*); - -extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* -rocksdb_filterpolicy_create_bloom(int bits_per_key); -extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* -rocksdb_filterpolicy_create_bloom_full(int bits_per_key); - -/* Merge Operator */ - -extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t* -rocksdb_mergeoperator_create( - void* state, void (*destructor)(void*), - char* (*full_merge)(void*, const char* key, size_t key_length, - const char* existing_value, - size_t existing_value_length, - const char* const* operands_list, - const size_t* operands_list_length, int num_operands, - unsigned char* success, size_t* new_value_length), - char* (*partial_merge)(void*, const char* key, size_t key_length, - const char* const* operands_list, - const size_t* operands_list_length, int num_operands, - unsigned char* success, size_t* new_value_length), - void (*delete_value)(void*, const char* value, size_t value_length), - const char* (*name)(void*)); -extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy( - rocksdb_mergeoperator_t*); - -/* Read options */ - -extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy( - rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_readoptions_get_verify_checksums(rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_fill_cache( - rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot( - rocksdb_readoptions_t*, const rocksdb_snapshot_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound( - rocksdb_readoptions_t*, const char* key, size_t keylen); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_lower_bound( - rocksdb_readoptions_t*, const char* key, size_t keylen); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier( - rocksdb_readoptions_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_readoptions_get_read_tier( - rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_tailing( - rocksdb_readoptions_t*); -// The functionality that this option controlled has been removed. -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_managed( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size( - rocksdb_readoptions_t*, size_t); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_prefix_same_as_start( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_readoptions_get_prefix_same_as_start(rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_pin_data( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_pin_data( - rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_total_order_seek( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_readoptions_get_total_order_seek(rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_max_skippable_internal_keys( - rocksdb_readoptions_t*, uint64_t); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_readoptions_get_max_skippable_internal_keys(rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_background_purge_on_iterator_cleanup( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_readoptions_get_background_purge_on_iterator_cleanup( - rocksdb_readoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_ignore_range_deletions( - rocksdb_readoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_readoptions_get_ignore_range_deletions(rocksdb_readoptions_t*); - -/* Write options */ - -extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t* -rocksdb_writeoptions_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy( - rocksdb_writeoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync( - rocksdb_writeoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_sync( - rocksdb_writeoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL( - rocksdb_writeoptions_t* opt, int disable); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_disable_WAL( - rocksdb_writeoptions_t* opt); -extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_ignore_missing_column_families( - rocksdb_writeoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_writeoptions_get_ignore_missing_column_families( - rocksdb_writeoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_no_slowdown( - rocksdb_writeoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_no_slowdown( - rocksdb_writeoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_low_pri( - rocksdb_writeoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_low_pri( - rocksdb_writeoptions_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_writeoptions_set_memtable_insert_hint_per_batch(rocksdb_writeoptions_t*, - unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_writeoptions_get_memtable_insert_hint_per_batch( - rocksdb_writeoptions_t*); - -/* Compact range options */ - -extern ROCKSDB_LIBRARY_API rocksdb_compactoptions_t* -rocksdb_compactoptions_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_destroy( - rocksdb_compactoptions_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_compactoptions_set_exclusive_manual_compaction( - rocksdb_compactoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_compactoptions_get_exclusive_manual_compaction( - rocksdb_compactoptions_t*); -extern ROCKSDB_LIBRARY_API void -rocksdb_compactoptions_set_bottommost_level_compaction( - rocksdb_compactoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_compactoptions_get_bottommost_level_compaction( - rocksdb_compactoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_change_level( - rocksdb_compactoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char -rocksdb_compactoptions_get_change_level(rocksdb_compactoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_target_level( - rocksdb_compactoptions_t*, int); -extern ROCKSDB_LIBRARY_API int rocksdb_compactoptions_get_target_level( - rocksdb_compactoptions_t*); - -/* Flush options */ - -extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t* -rocksdb_flushoptions_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy( - rocksdb_flushoptions_t*); -extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait( - rocksdb_flushoptions_t*, unsigned char); -extern ROCKSDB_LIBRARY_API unsigned char rocksdb_flushoptions_get_wait( - rocksdb_flushoptions_t*); - -/* Cache */ - -extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru( - size_t capacity); -extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache); -extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity( - rocksdb_cache_t* cache, size_t capacity); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_cache_get_capacity(rocksdb_cache_t* cache); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_cache_get_usage(rocksdb_cache_t* cache); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache); - -/* DBPath */ - -extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size); -extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*); - -/* Env */ - -extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(); -extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(); -extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( - rocksdb_env_t* env, int n); -extern ROCKSDB_LIBRARY_API void -rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); -extern ROCKSDB_LIBRARY_API void rocksdb_env_set_low_priority_background_threads( - rocksdb_env_t* env, int n); -extern ROCKSDB_LIBRARY_API void -rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env, int n); -extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( - rocksdb_env_t* env); -extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_io_priority(rocksdb_env_t* env); -extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env); -extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_cpu_priority(rocksdb_env_t* env); -extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env); - -extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*); - -extern ROCKSDB_LIBRARY_API rocksdb_envoptions_t* rocksdb_envoptions_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_envoptions_destroy( - rocksdb_envoptions_t* opt); - -/* SstFile */ - -extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* -rocksdb_sstfilewriter_create(const rocksdb_envoptions_t* env, - const rocksdb_options_t* io_options); -extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* -rocksdb_sstfilewriter_create_with_comparator( - const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, - const rocksdb_comparator_t* comparator); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_open( - rocksdb_sstfilewriter_t* writer, const char* name, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_add( - rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, - const char* val, size_t vallen, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put( - rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, - const char* val, size_t vallen, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_merge( - rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, - const char* val, size_t vallen, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete( - rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, - char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_finish( - rocksdb_sstfilewriter_t* writer, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_file_size( - rocksdb_sstfilewriter_t* writer, uint64_t* file_size); -extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_destroy( - rocksdb_sstfilewriter_t* writer); - -extern ROCKSDB_LIBRARY_API rocksdb_ingestexternalfileoptions_t* -rocksdb_ingestexternalfileoptions_create(); -extern ROCKSDB_LIBRARY_API void -rocksdb_ingestexternalfileoptions_set_move_files( - rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files); -extern ROCKSDB_LIBRARY_API void -rocksdb_ingestexternalfileoptions_set_snapshot_consistency( - rocksdb_ingestexternalfileoptions_t* opt, - unsigned char snapshot_consistency); -extern ROCKSDB_LIBRARY_API void -rocksdb_ingestexternalfileoptions_set_allow_global_seqno( - rocksdb_ingestexternalfileoptions_t* opt, unsigned char allow_global_seqno); -extern ROCKSDB_LIBRARY_API void -rocksdb_ingestexternalfileoptions_set_allow_blocking_flush( - rocksdb_ingestexternalfileoptions_t* opt, - unsigned char allow_blocking_flush); -extern ROCKSDB_LIBRARY_API void -rocksdb_ingestexternalfileoptions_set_ingest_behind( - rocksdb_ingestexternalfileoptions_t* opt, - unsigned char ingest_behind); -extern ROCKSDB_LIBRARY_API void rocksdb_ingestexternalfileoptions_destroy( - rocksdb_ingestexternalfileoptions_t* opt); - -extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file( - rocksdb_t* db, const char* const* file_list, const size_t list_len, - const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* handle, - const char* const* file_list, const size_t list_len, - const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_try_catch_up_with_primary( - rocksdb_t* db, char** errptr); - -/* SliceTransform */ - -extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* -rocksdb_slicetransform_create( - void* state, void (*destructor)(void*), - char* (*transform)(void*, const char* key, size_t length, - size_t* dst_length), - unsigned char (*in_domain)(void*, const char* key, size_t length), - unsigned char (*in_range)(void*, const char* key, size_t length), - const char* (*name)(void*)); -extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* - rocksdb_slicetransform_create_fixed_prefix(size_t); -extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* -rocksdb_slicetransform_create_noop(); -extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy( - rocksdb_slicetransform_t*); - -/* Universal Compaction options */ - -enum { - rocksdb_similar_size_compaction_stop_style = 0, - rocksdb_total_size_compaction_stop_style = 1 -}; - -extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t* -rocksdb_universal_compaction_options_create(); -extern ROCKSDB_LIBRARY_API void -rocksdb_universal_compaction_options_set_size_ratio( - rocksdb_universal_compaction_options_t*, int); -extern ROCKSDB_LIBRARY_API void -rocksdb_universal_compaction_options_set_min_merge_width( - rocksdb_universal_compaction_options_t*, int); -extern ROCKSDB_LIBRARY_API void -rocksdb_universal_compaction_options_set_max_merge_width( - rocksdb_universal_compaction_options_t*, int); -extern ROCKSDB_LIBRARY_API void -rocksdb_universal_compaction_options_set_max_size_amplification_percent( - rocksdb_universal_compaction_options_t*, int); -extern ROCKSDB_LIBRARY_API void -rocksdb_universal_compaction_options_set_compression_size_percent( - rocksdb_universal_compaction_options_t*, int); -extern ROCKSDB_LIBRARY_API void -rocksdb_universal_compaction_options_set_stop_style( - rocksdb_universal_compaction_options_t*, int); -extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( - rocksdb_universal_compaction_options_t*); - -extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t* -rocksdb_fifo_compaction_options_create(); -extern ROCKSDB_LIBRARY_API void -rocksdb_fifo_compaction_options_set_max_table_files_size( - rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); -extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( - rocksdb_fifo_compaction_options_t* fifo_opts); - -extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count( - const rocksdb_livefiles_t*); -extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name( - const rocksdb_livefiles_t*, int index); -extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level( - const rocksdb_livefiles_t*, int index); -extern ROCKSDB_LIBRARY_API size_t -rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index); -extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey( - const rocksdb_livefiles_t*, int index, size_t* size); -extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey( - const rocksdb_livefiles_t*, int index, size_t* size); -extern ROCKSDB_LIBRARY_API uint64_t rocksdb_livefiles_entries( - const rocksdb_livefiles_t*, int index); -extern ROCKSDB_LIBRARY_API uint64_t rocksdb_livefiles_deletions( - const rocksdb_livefiles_t*, int index); -extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy( - const rocksdb_livefiles_t*); - -/* Utility Helpers */ - -extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string( - const rocksdb_options_t* base_options, const char* opts_str, - rocksdb_options_t* new_options, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range( - rocksdb_t* db, const char* start_key, size_t start_key_len, - const char* limit_key, size_t limit_key_len, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( - rocksdb_t* db, rocksdb_column_family_handle_t* column_family, - const char* start_key, size_t start_key_len, const char* limit_key, - size_t limit_key_len, char** errptr); - -/* Transactions */ - -extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* -rocksdb_transactiondb_create_column_family( - rocksdb_transactiondb_t* txn_db, - const rocksdb_options_t* column_family_options, - const char* column_family_name, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open( - const rocksdb_options_t* options, - const rocksdb_transactiondb_options_t* txn_db_options, const char* name, - char** errptr); - -rocksdb_transactiondb_t* rocksdb_transactiondb_open_column_families( - const rocksdb_options_t* options, - const rocksdb_transactiondb_options_t* txn_db_options, const char* name, - int num_column_families, const char* const* column_family_names, - const rocksdb_options_t* const* column_family_options, - rocksdb_column_family_handle_t** column_family_handles, char** errptr); - -extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* -rocksdb_transactiondb_create_snapshot(rocksdb_transactiondb_t* txn_db); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_release_snapshot( - rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot); - -extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* rocksdb_transaction_begin( - rocksdb_transactiondb_t* txn_db, - const rocksdb_writeoptions_t* write_options, - const rocksdb_transaction_options_t* txn_options, - rocksdb_transaction_t* old_txn); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_commit( - rocksdb_transaction_t* txn, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback( - rocksdb_transaction_t* txn, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_savepoint( - rocksdb_transaction_t* txn); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback_to_savepoint( - rocksdb_transaction_t* txn, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy( - rocksdb_transaction_t* txn); - -// This snapshot should be freed using rocksdb_free -extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* -rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn); - -extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( - rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, - const char* key, size_t klen, size_t* vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( - rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, - size_t* vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update( - rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, - const char* key, size_t klen, size_t* vlen, unsigned char exclusive, - char** errptr); - -char* rocksdb_transaction_get_for_update_cf( - rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, - size_t* vlen, unsigned char exclusive, char** errptr); - -extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( - rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, - const char* key, size_t klen, size_t* vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf( - rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, size_t* vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put( - rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, - size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf( - rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, const char* val, size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - const char* key, size_t klen, const char* val, size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, const char* val, size_t vallen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - rocksdb_writebatch_t *batch, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge( - rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, - size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge_cf( - rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, const char* val, size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - const char* key, size_t klen, const char* val, size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge_cf( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, - const char* val, size_t vlen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( - rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf( - rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, - const char* key, size_t klen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - const char* key, size_t klen, char** errptr); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf( - rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* -rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, - const rocksdb_readoptions_t* options); - -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* -rocksdb_transaction_create_iterator_cf( - rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family); - -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* -rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db, - const rocksdb_readoptions_t* options); - -extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* -rocksdb_transactiondb_create_iterator_cf( - rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close( - rocksdb_transactiondb_t* txn_db); - -extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* -rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db, - char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* -rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options, - const char* name, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* -rocksdb_optimistictransactiondb_open_column_families( - const rocksdb_options_t* options, const char* name, int num_column_families, - const char* const* column_family_names, - const rocksdb_options_t* const* column_family_options, - rocksdb_column_family_handle_t** column_family_handles, char** errptr); - -extern ROCKSDB_LIBRARY_API rocksdb_t* -rocksdb_optimistictransactiondb_get_base_db( - rocksdb_optimistictransactiondb_t* otxn_db); - -extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close_base_db( - rocksdb_t* base_db); - -extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* -rocksdb_optimistictransaction_begin( - rocksdb_optimistictransactiondb_t* otxn_db, - const rocksdb_writeoptions_t* write_options, - const rocksdb_optimistictransaction_options_t* otxn_options, - rocksdb_transaction_t* old_txn); - -extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close( - rocksdb_optimistictransactiondb_t* otxn_db); - -/* Transaction Options */ - -extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t* -rocksdb_transactiondb_options_create(); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_destroy( - rocksdb_transactiondb_options_t* opt); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_max_num_locks( - rocksdb_transactiondb_options_t* opt, int64_t max_num_locks); - -extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_num_stripes( - rocksdb_transactiondb_options_t* opt, size_t num_stripes); - -extern ROCKSDB_LIBRARY_API void -rocksdb_transactiondb_options_set_transaction_lock_timeout( - rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout); - -extern ROCKSDB_LIBRARY_API void -rocksdb_transactiondb_options_set_default_lock_timeout( - rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout); - -extern ROCKSDB_LIBRARY_API rocksdb_transaction_options_t* -rocksdb_transaction_options_create(); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_destroy( - rocksdb_transaction_options_t* opt); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_set_snapshot( - rocksdb_transaction_options_t* opt, unsigned char v); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_deadlock_detect( - rocksdb_transaction_options_t* opt, unsigned char v); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_lock_timeout( - rocksdb_transaction_options_t* opt, int64_t lock_timeout); - -extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_expiration( - rocksdb_transaction_options_t* opt, int64_t expiration); - -extern ROCKSDB_LIBRARY_API void -rocksdb_transaction_options_set_deadlock_detect_depth( - rocksdb_transaction_options_t* opt, int64_t depth); - -extern ROCKSDB_LIBRARY_API void -rocksdb_transaction_options_set_max_write_batch_size( - rocksdb_transaction_options_t* opt, size_t size); - -extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t* -rocksdb_optimistictransaction_options_create(); - -extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy( - rocksdb_optimistictransaction_options_t* opt); - -extern ROCKSDB_LIBRARY_API void -rocksdb_optimistictransaction_options_set_set_snapshot( - rocksdb_optimistictransaction_options_t* opt, unsigned char v); - -// referring to convention (3), this should be used by client -// to free memory that was malloc()ed -extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); - -extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned( - rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, - size_t keylen, char** errptr); -extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned_cf( - rocksdb_t* db, const rocksdb_readoptions_t* options, - rocksdb_column_family_handle_t* column_family, const char* key, - size_t keylen, char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_pinnableslice_destroy( - rocksdb_pinnableslice_t* v); -extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value( - const rocksdb_pinnableslice_t* t, size_t* vlen); - -extern ROCKSDB_LIBRARY_API rocksdb_memory_consumers_t* - rocksdb_memory_consumers_create(); -extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_db( - rocksdb_memory_consumers_t* consumers, rocksdb_t* db); -extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_cache( - rocksdb_memory_consumers_t* consumers, rocksdb_cache_t* cache); -extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_destroy( - rocksdb_memory_consumers_t* consumers); -extern ROCKSDB_LIBRARY_API rocksdb_memory_usage_t* -rocksdb_approximate_memory_usage_create(rocksdb_memory_consumers_t* consumers, - char** errptr); -extern ROCKSDB_LIBRARY_API void rocksdb_approximate_memory_usage_destroy( - rocksdb_memory_usage_t* usage); - -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_approximate_memory_usage_get_mem_table_total( - rocksdb_memory_usage_t* memory_usage); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_approximate_memory_usage_get_mem_table_unflushed( - rocksdb_memory_usage_t* memory_usage); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_approximate_memory_usage_get_mem_table_readers_total( - rocksdb_memory_usage_t* memory_usage); -extern ROCKSDB_LIBRARY_API uint64_t -rocksdb_approximate_memory_usage_get_cache_total( - rocksdb_memory_usage_t* memory_usage); - -extern ROCKSDB_LIBRARY_API void rocksdb_options_set_dump_malloc_stats( - rocksdb_options_t*, unsigned char); - -extern ROCKSDB_LIBRARY_API void -rocksdb_options_set_memtable_whole_key_filtering(rocksdb_options_t*, - unsigned char); - -extern ROCKSDB_LIBRARY_API void rocksdb_cancel_all_background_work( - rocksdb_t* db, unsigned char wait); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif diff --git a/dist/darwin_amd64/include/rocksdb/cache.h b/dist/darwin_amd64/include/rocksdb/cache.h deleted file mode 100644 index e4c4043..0000000 --- a/dist/darwin_amd64/include/rocksdb/cache.h +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// A Cache is an interface that maps keys to values. It has internal -// synchronization and may be safely accessed concurrently from -// multiple threads. It may automatically evict entries to make room -// for new entries. Values have a specified charge against the cache -// capacity. For example, a cache where the values are variable -// length strings, may use the length of the string as the charge for -// the string. -// -// A builtin cache implementation with a least-recently-used eviction -// policy is provided. Clients may use their own implementations if -// they want something more sophisticated (like scan-resistance, a -// custom eviction policy, variable cache sizing, etc.) - -#pragma once - -#include -#include -#include -#include "rocksdb/memory_allocator.h" -#include "rocksdb/slice.h" -#include "rocksdb/statistics.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class Cache; -struct ConfigOptions; - -extern const bool kDefaultToAdaptiveMutex; - -enum CacheMetadataChargePolicy { - kDontChargeCacheMetadata, - kFullChargeCacheMetadata -}; -const CacheMetadataChargePolicy kDefaultCacheMetadataChargePolicy = - kFullChargeCacheMetadata; - -struct LRUCacheOptions { - // Capacity of the cache. - size_t capacity = 0; - - // Cache is sharded into 2^num_shard_bits shards, - // by hash of key. Refer to NewLRUCache for further - // information. - int num_shard_bits = -1; - - // If strict_capacity_limit is set, - // insert to the cache will fail when cache is full. - bool strict_capacity_limit = false; - - // Percentage of cache reserved for high priority entries. - // If greater than zero, the LRU list will be split into a high-pri - // list and a low-pri list. High-pri entries will be insert to the - // tail of high-pri list, while low-pri entries will be first inserted to - // the low-pri list (the midpoint). This is refered to as - // midpoint insertion strategy to make entries never get hit in cache - // age out faster. - // - // See also - // BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority. - double high_pri_pool_ratio = 0.5; - - // If non-nullptr will use this allocator instead of system allocator when - // allocating memory for cache blocks. Call this method before you start using - // the cache! - // - // Caveat: when the cache is used as block cache, the memory allocator is - // ignored when dealing with compression libraries that allocate memory - // internally (currently only XPRESS). - std::shared_ptr memory_allocator; - - // Whether to use adaptive mutexes for cache shards. Note that adaptive - // mutexes need to be supported by the platform in order for this to have any - // effect. The default value is true if RocksDB is compiled with - // -DROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX, false otherwise. - bool use_adaptive_mutex = kDefaultToAdaptiveMutex; - - CacheMetadataChargePolicy metadata_charge_policy = - kDefaultCacheMetadataChargePolicy; - - LRUCacheOptions() {} - LRUCacheOptions(size_t _capacity, int _num_shard_bits, - bool _strict_capacity_limit, double _high_pri_pool_ratio, - std::shared_ptr _memory_allocator = nullptr, - bool _use_adaptive_mutex = kDefaultToAdaptiveMutex, - CacheMetadataChargePolicy _metadata_charge_policy = - kDefaultCacheMetadataChargePolicy) - : capacity(_capacity), - num_shard_bits(_num_shard_bits), - strict_capacity_limit(_strict_capacity_limit), - high_pri_pool_ratio(_high_pri_pool_ratio), - memory_allocator(std::move(_memory_allocator)), - use_adaptive_mutex(_use_adaptive_mutex), - metadata_charge_policy(_metadata_charge_policy) {} -}; - -// Create a new cache with a fixed size capacity. The cache is sharded -// to 2^num_shard_bits shards, by hash of the key. The total capacity -// is divided and evenly assigned to each shard. If strict_capacity_limit -// is set, insert to the cache will fail when cache is full. User can also -// set percentage of the cache reserves for high priority entries via -// high_pri_pool_pct. -// num_shard_bits = -1 means it is automatically determined: every shard -// will be at least 512KB and number of shard bits will not exceed 6. -extern std::shared_ptr NewLRUCache( - size_t capacity, int num_shard_bits = -1, - bool strict_capacity_limit = false, double high_pri_pool_ratio = 0.5, - std::shared_ptr memory_allocator = nullptr, - bool use_adaptive_mutex = kDefaultToAdaptiveMutex, - CacheMetadataChargePolicy metadata_charge_policy = - kDefaultCacheMetadataChargePolicy); - -extern std::shared_ptr NewLRUCache(const LRUCacheOptions& cache_opts); - -// Similar to NewLRUCache, but create a cache based on CLOCK algorithm with -// better concurrent performance in some cases. See util/clock_cache.cc for -// more detail. -// -// Return nullptr if it is not supported. -extern std::shared_ptr NewClockCache( - size_t capacity, int num_shard_bits = -1, - bool strict_capacity_limit = false, - CacheMetadataChargePolicy metadata_charge_policy = - kDefaultCacheMetadataChargePolicy); -class Cache { - public: - // Depending on implementation, cache entries with high priority could be less - // likely to get evicted than low priority entries. - enum class Priority { HIGH, LOW }; - - Cache(std::shared_ptr allocator = nullptr) - : memory_allocator_(std::move(allocator)) {} - // No copying allowed - Cache(const Cache&) = delete; - Cache& operator=(const Cache&) = delete; - - // Creates a new Cache based on the input value string and returns the result. - // Currently, this method can be used to create LRUCaches only - // @param config_options - // @param value The value might be: - // - an old-style cache ("1M") -- equivalent to NewLRUCache(1024*102( - // - Name-value option pairs -- "capacity=1M; num_shard_bits=4; - // For the LRUCache, the values are defined in LRUCacheOptions. - // @param result The new Cache object - // @return OK if the cache was sucessfully created - // @return NotFound if an invalid name was specified in the value - // @return InvalidArgument if either the options were not valid - static Status CreateFromString(const ConfigOptions& config_options, - const std::string& value, - std::shared_ptr* result); - - // Destroys all existing entries by calling the "deleter" - // function that was passed via the Insert() function. - // - // @See Insert - virtual ~Cache() {} - - // Opaque handle to an entry stored in the cache. - struct Handle {}; - - // The type of the Cache - virtual const char* Name() const = 0; - - // Insert a mapping from key->value into the cache and assign it - // the specified charge against the total cache capacity. - // If strict_capacity_limit is true and cache reaches its full capacity, - // return Status::Incomplete. - // - // If handle is not nullptr, returns a handle that corresponds to the - // mapping. The caller must call this->Release(handle) when the returned - // mapping is no longer needed. In case of error caller is responsible to - // cleanup the value (i.e. calling "deleter"). - // - // If handle is nullptr, it is as if Release is called immediately after - // insert. In case of error value will be cleanup. - // - // When the inserted entry is no longer needed, the key and - // value will be passed to "deleter". - virtual Status Insert(const Slice& key, void* value, size_t charge, - void (*deleter)(const Slice& key, void* value), - Handle** handle = nullptr, - Priority priority = Priority::LOW) = 0; - - // If the cache has no mapping for "key", returns nullptr. - // - // Else return a handle that corresponds to the mapping. The caller - // must call this->Release(handle) when the returned mapping is no - // longer needed. - // If stats is not nullptr, relative tickers could be used inside the - // function. - virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0; - - // Increments the reference count for the handle if it refers to an entry in - // the cache. Returns true if refcount was incremented; otherwise, returns - // false. - // REQUIRES: handle must have been returned by a method on *this. - virtual bool Ref(Handle* handle) = 0; - - /** - * Release a mapping returned by a previous Lookup(). A released entry might - * still remain in cache in case it is later looked up by others. If - * force_erase is set then it also erase it from the cache if there is no - * other reference to it. Erasing it should call the deleter function that - * was provided when the - * entry was inserted. - * - * Returns true if the entry was also erased. - */ - // REQUIRES: handle must not have been released yet. - // REQUIRES: handle must have been returned by a method on *this. - virtual bool Release(Handle* handle, bool force_erase = false) = 0; - - // Return the value encapsulated in a handle returned by a - // successful Lookup(). - // REQUIRES: handle must not have been released yet. - // REQUIRES: handle must have been returned by a method on *this. - virtual void* Value(Handle* handle) = 0; - - // If the cache contains entry for key, erase it. Note that the - // underlying entry will be kept around until all existing handles - // to it have been released. - virtual void Erase(const Slice& key) = 0; - // Return a new numeric id. May be used by multiple clients who are - // sharding the same cache to partition the key space. Typically the - // client will allocate a new id at startup and prepend the id to - // its cache keys. - virtual uint64_t NewId() = 0; - - // sets the maximum configured capacity of the cache. When the new - // capacity is less than the old capacity and the existing usage is - // greater than new capacity, the implementation will do its best job to - // purge the released entries from the cache in order to lower the usage - virtual void SetCapacity(size_t capacity) = 0; - - // Set whether to return error on insertion when cache reaches its full - // capacity. - virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0; - - // Get the flag whether to return error on insertion when cache reaches its - // full capacity. - virtual bool HasStrictCapacityLimit() const = 0; - - // returns the maximum configured capacity of the cache - virtual size_t GetCapacity() const = 0; - - // returns the memory size for the entries residing in the cache. - virtual size_t GetUsage() const = 0; - - // returns the memory size for a specific entry in the cache. - virtual size_t GetUsage(Handle* handle) const = 0; - - // returns the memory size for the entries in use by the system - virtual size_t GetPinnedUsage() const = 0; - - // returns the charge for the specific entry in the cache. - virtual size_t GetCharge(Handle* handle) const = 0; - - // Call this on shutdown if you want to speed it up. Cache will disown - // any underlying data and will not free it on delete. This call will leak - // memory - call this only if you're shutting down the process. - // Any attempts of using cache after this call will fail terribly. - // Always delete the DB object before calling this method! - virtual void DisownData(){ - // default implementation is noop - } - - // Apply callback to all entries in the cache - // If thread_safe is true, it will also lock the accesses. Otherwise, it will - // access the cache without the lock held - virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t), - bool thread_safe) = 0; - - // Remove all entries. - // Prerequisite: no entry is referenced. - virtual void EraseUnRefEntries() = 0; - - virtual std::string GetPrintableOptions() const { return ""; } - - MemoryAllocator* memory_allocator() const { return memory_allocator_.get(); } - - private: - std::shared_ptr memory_allocator_; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/cleanable.h b/dist/darwin_amd64/include/rocksdb/cleanable.h deleted file mode 100644 index b6a70ea..0000000 --- a/dist/darwin_amd64/include/rocksdb/cleanable.h +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class Cleanable { - public: - Cleanable(); - // No copy constructor and copy assignment allowed. - Cleanable(Cleanable&) = delete; - Cleanable& operator=(Cleanable&) = delete; - - ~Cleanable(); - - // Move constructor and move assignment is allowed. - Cleanable(Cleanable&&); - Cleanable& operator=(Cleanable&&); - - // Clients are allowed to register function/arg1/arg2 triples that - // will be invoked when this iterator is destroyed. - // - // Note that unlike all of the preceding methods, this method is - // not abstract and therefore clients should not override it. - typedef void (*CleanupFunction)(void* arg1, void* arg2); - void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); - void DelegateCleanupsTo(Cleanable* other); - // DoCleanup and also resets the pointers for reuse - inline void Reset() { - DoCleanup(); - cleanup_.function = nullptr; - cleanup_.next = nullptr; - } - - protected: - struct Cleanup { - CleanupFunction function; - void* arg1; - void* arg2; - Cleanup* next; - }; - Cleanup cleanup_; - // It also becomes the owner of c - void RegisterCleanup(Cleanup* c); - - private: - // Performs all the cleanups. It does not reset the pointers. Making it - // private - // to prevent misuse - inline void DoCleanup() { - if (cleanup_.function != nullptr) { - (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2); - for (Cleanup* c = cleanup_.next; c != nullptr;) { - (*c->function)(c->arg1, c->arg2); - Cleanup* next = c->next; - delete c; - c = next; - } - } - } -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/compaction_filter.h b/dist/darwin_amd64/include/rocksdb/compaction_filter.h deleted file mode 100644 index ed17889..0000000 --- a/dist/darwin_amd64/include/rocksdb/compaction_filter.h +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2013 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include -#include -#include -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -class SliceTransform; - -// Context information of a compaction run -struct CompactionFilterContext { - // Does this compaction run include all data files - bool is_full_compaction; - // Is this compaction requested by the client (true), - // or is it occurring as an automatic compaction process - bool is_manual_compaction; -}; - -// CompactionFilter allows an application to modify/delete a key-value at -// the time of compaction. - -class CompactionFilter { - public: - enum ValueType { - kValue, - kMergeOperand, - kBlobIndex, // used internally by BlobDB. - }; - - enum class Decision { - kKeep, - kRemove, - kChangeValue, - kRemoveAndSkipUntil, - kChangeBlobIndex, // used internally by BlobDB. - kIOError, // used internally by BlobDB. - }; - - enum class BlobDecision { kKeep, kChangeValue, kCorruption, kIOError }; - - // Context information of a compaction run - struct Context { - // Does this compaction run include all data files - bool is_full_compaction; - // Is this compaction requested by the client (true), - // or is it occurring as an automatic compaction process - bool is_manual_compaction; - // Which column family this compaction is for. - uint32_t column_family_id; - }; - - virtual ~CompactionFilter() {} - - // The compaction process invokes this - // method for kv that is being compacted. A return value - // of false indicates that the kv should be preserved in the - // output of this compaction run and a return value of true - // indicates that this key-value should be removed from the - // output of the compaction. The application can inspect - // the existing value of the key and make decision based on it. - // - // Key-Values that are results of merge operation during compaction are not - // passed into this function. Currently, when you have a mix of Put()s and - // Merge()s on a same key, we only guarantee to process the merge operands - // through the compaction filters. Put()s might be processed, or might not. - // - // When the value is to be preserved, the application has the option - // to modify the existing_value and pass it back through new_value. - // value_changed needs to be set to true in this case. - // - // Note that RocksDB snapshots (i.e. call GetSnapshot() API on a - // DB* object) will not guarantee to preserve the state of the DB with - // CompactionFilter. Data seen from a snapshot might disppear after a - // compaction finishes. If you use snapshots, think twice about whether you - // want to use compaction filter and whether you are using it in a safe way. - // - // If multithreaded compaction is being used *and* a single CompactionFilter - // instance was supplied via Options::compaction_filter, this method may be - // called from different threads concurrently. The application must ensure - // that the call is thread-safe. - // - // If the CompactionFilter was created by a factory, then it will only ever - // be used by a single thread that is doing the compaction run, and this - // call does not need to be thread-safe. However, multiple filters may be - // in existence and operating concurrently. - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*existing_value*/, - std::string* /*new_value*/, - bool* /*value_changed*/) const { - return false; - } - - // The compaction process invokes this method on every merge operand. If this - // method returns true, the merge operand will be ignored and not written out - // in the compaction output - // - // Note: If you are using a TransactionDB, it is not recommended to implement - // FilterMergeOperand(). If a Merge operation is filtered out, TransactionDB - // may not realize there is a write conflict and may allow a Transaction to - // Commit that should have failed. Instead, it is better to implement any - // Merge filtering inside the MergeOperator. - virtual bool FilterMergeOperand(int /*level*/, const Slice& /*key*/, - const Slice& /*operand*/) const { - return false; - } - - // An extended API. Called for both values and merge operands. - // Allows changing value and skipping ranges of keys. - // The default implementation uses Filter() and FilterMergeOperand(). - // If you're overriding this method, no need to override the other two. - // `value_type` indicates whether this key-value corresponds to a normal - // value (e.g. written with Put()) or a merge operand (written with Merge()). - // - // Possible return values: - // * kKeep - keep the key-value pair. - // * kRemove - remove the key-value pair or merge operand. - // * kChangeValue - keep the key and change the value/operand to *new_value. - // * kRemoveAndSkipUntil - remove this key-value pair, and also remove - // all key-value pairs with key in [key, *skip_until). This range - // of keys will be skipped without reading, potentially saving some - // IO operations compared to removing the keys one by one. - // - // *skip_until <= key is treated the same as Decision::kKeep - // (since the range [key, *skip_until) is empty). - // - // Caveats: - // - The keys are skipped even if there are snapshots containing them, - // i.e. values removed by kRemoveAndSkipUntil can disappear from a - // snapshot - beware if you're using TransactionDB or - // DB::GetSnapshot(). - // - If value for a key was overwritten or merged into (multiple Put()s - // or Merge()s), and compaction filter skips this key with - // kRemoveAndSkipUntil, it's possible that it will remove only - // the new value, exposing the old value that was supposed to be - // overwritten. - // - Doesn't work with PlainTableFactory in prefix mode. - // - If you use kRemoveAndSkipUntil, consider also reducing - // compaction_readahead_size option. - // - // Note: If you are using a TransactionDB, it is not recommended to filter - // out or modify merge operands (ValueType::kMergeOperand). - // If a merge operation is filtered out, TransactionDB may not realize there - // is a write conflict and may allow a Transaction to Commit that should have - // failed. Instead, it is better to implement any Merge filtering inside the - // MergeOperator. - virtual Decision FilterV2(int level, const Slice& key, ValueType value_type, - const Slice& existing_value, std::string* new_value, - std::string* /*skip_until*/) const { - switch (value_type) { - case ValueType::kValue: { - bool value_changed = false; - bool rv = Filter(level, key, existing_value, new_value, &value_changed); - if (rv) { - return Decision::kRemove; - } - return value_changed ? Decision::kChangeValue : Decision::kKeep; - } - case ValueType::kMergeOperand: { - bool rv = FilterMergeOperand(level, key, existing_value); - return rv ? Decision::kRemove : Decision::kKeep; - } - case ValueType::kBlobIndex: - return Decision::kKeep; - } - assert(false); - return Decision::kKeep; - } - - // Internal (BlobDB) use only. Do not override in application code. - virtual BlobDecision PrepareBlobOutput(const Slice& /* key */, - const Slice& /* existing_value */, - std::string* /* new_value */) const { - return BlobDecision::kKeep; - } - - // This function is deprecated. Snapshots will always be ignored for - // compaction filters, because we realized that not ignoring snapshots doesn't - // provide the gurantee we initially thought it would provide. Repeatable - // reads will not be guaranteed anyway. If you override the function and - // returns false, we will fail the compaction. - virtual bool IgnoreSnapshots() const { return true; } - - // Returns a name that identifies this compaction filter. - // The name will be printed to LOG file on start up for diagnosis. - virtual const char* Name() const = 0; -}; - -// Each compaction will create a new CompactionFilter allowing the -// application to know about different compactions -class CompactionFilterFactory { - public: - virtual ~CompactionFilterFactory() {} - - virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) = 0; - - // Returns a name that identifies this compaction filter factory. - virtual const char* Name() const = 0; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/compaction_job_stats.h b/dist/darwin_amd64/include/rocksdb/compaction_job_stats.h deleted file mode 100644 index 0f9c8fc..0000000 --- a/dist/darwin_amd64/include/rocksdb/compaction_job_stats.h +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#include -#include -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { -struct CompactionJobStats { - CompactionJobStats() { Reset(); } - void Reset(); - // Aggregate the CompactionJobStats from another instance with this one - void Add(const CompactionJobStats& stats); - - // the elapsed time of this compaction in microseconds. - uint64_t elapsed_micros; - - // the elapsed CPU time of this compaction in microseconds. - uint64_t cpu_micros; - - // the number of compaction input records. - uint64_t num_input_records; - // the number of compaction input files. - size_t num_input_files; - // the number of compaction input files at the output level. - size_t num_input_files_at_output_level; - - // the number of compaction output records. - uint64_t num_output_records; - // the number of compaction output files. - size_t num_output_files; - - // true if the compaction is a full compaction (all live SST files input) - bool is_full_compaction; - // true if the compaction is a manual compaction - bool is_manual_compaction; - - // the size of the compaction input in bytes. - uint64_t total_input_bytes; - // the size of the compaction output in bytes. - uint64_t total_output_bytes; - - // number of records being replaced by newer record associated with same key. - // this could be a new value or a deletion entry for that key so this field - // sums up all updated and deleted keys - uint64_t num_records_replaced; - - // the sum of the uncompressed input keys in bytes. - uint64_t total_input_raw_key_bytes; - // the sum of the uncompressed input values in bytes. - uint64_t total_input_raw_value_bytes; - - // the number of deletion entries before compaction. Deletion entries - // can disappear after compaction because they expired - uint64_t num_input_deletion_records; - // number of deletion records that were found obsolete and discarded - // because it is not possible to delete any more keys with this entry - // (i.e. all possible deletions resulting from it have been completed) - uint64_t num_expired_deletion_records; - - // number of corrupt keys (ParseInternalKey returned false when applied to - // the key) encountered and written out. - uint64_t num_corrupt_keys; - - // Following counters are only populated if - // options.report_bg_io_stats = true; - - // Time spent on file's Append() call. - uint64_t file_write_nanos; - - // Time spent on sync file range. - uint64_t file_range_sync_nanos; - - // Time spent on file fsync. - uint64_t file_fsync_nanos; - - // Time spent on preparing file write (fallocate, etc) - uint64_t file_prepare_write_nanos; - - // 0-terminated strings storing the first 8 bytes of the smallest and - // largest key in the output. - static const size_t kMaxPrefixLength = 8; - - std::string smallest_output_key_prefix; - std::string largest_output_key_prefix; - - // number of single-deletes which do not meet a put - uint64_t num_single_del_fallthru; - - // number of single-deletes which meet something other than a put - uint64_t num_single_del_mismatch; -}; -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/comparator.h b/dist/darwin_amd64/include/rocksdb/comparator.h deleted file mode 100644 index 53a46ad..0000000 --- a/dist/darwin_amd64/include/rocksdb/comparator.h +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; - -// A Comparator object provides a total order across slices that are -// used as keys in an sstable or a database. A Comparator implementation -// must be thread-safe since rocksdb may invoke its methods concurrently -// from multiple threads. -class Comparator { - public: - Comparator() : timestamp_size_(0) {} - - Comparator(size_t ts_sz) : timestamp_size_(ts_sz) {} - - Comparator(const Comparator& orig) : timestamp_size_(orig.timestamp_size_) {} - - Comparator& operator=(const Comparator& rhs) { - if (this != &rhs) { - timestamp_size_ = rhs.timestamp_size_; - } - return *this; - } - - virtual ~Comparator() {} - - static const char* Type() { return "Comparator"; } - // Three-way comparison. Returns value: - // < 0 iff "a" < "b", - // == 0 iff "a" == "b", - // > 0 iff "a" > "b" - // Note that Compare(a, b) also compares timestamp if timestamp size is - // non-zero. For the same user key with different timestamps, larger (newer) - // timestamp comes first. - virtual int Compare(const Slice& a, const Slice& b) const = 0; - - // Compares two slices for equality. The following invariant should always - // hold (and is the default implementation): - // Equal(a, b) iff Compare(a, b) == 0 - // Overwrite only if equality comparisons can be done more efficiently than - // three-way comparisons. - virtual bool Equal(const Slice& a, const Slice& b) const { - return Compare(a, b) == 0; - } - - // The name of the comparator. Used to check for comparator - // mismatches (i.e., a DB created with one comparator is - // accessed using a different comparator. - // - // The client of this package should switch to a new name whenever - // the comparator implementation changes in a way that will cause - // the relative ordering of any two keys to change. - // - // Names starting with "rocksdb." are reserved and should not be used - // by any clients of this package. - virtual const char* Name() const = 0; - - // Advanced functions: these are used to reduce the space requirements - // for internal data structures like index blocks. - - // If *start < limit, changes *start to a short string in [start,limit). - // Simple comparator implementations may return with *start unchanged, - // i.e., an implementation of this method that does nothing is correct. - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const = 0; - - // Changes *key to a short string >= *key. - // Simple comparator implementations may return with *key unchanged, - // i.e., an implementation of this method that does nothing is correct. - virtual void FindShortSuccessor(std::string* key) const = 0; - - // if it is a wrapped comparator, may return the root one. - // return itself it is not wrapped. - virtual const Comparator* GetRootComparator() const { return this; } - - // given two keys, determine if t is the successor of s - virtual bool IsSameLengthImmediateSuccessor(const Slice& /*s*/, - const Slice& /*t*/) const { - return false; - } - - // return true if two keys with different byte sequences can be regarded - // as equal by this comparator. - // The major use case is to determine if DataBlockHashIndex is compatible - // with the customized comparator. - virtual bool CanKeysWithDifferentByteContentsBeEqual() const { return true; } - - inline size_t timestamp_size() const { return timestamp_size_; } - - int CompareWithoutTimestamp(const Slice& a, const Slice& b) const { - return CompareWithoutTimestamp(a, /*a_has_ts=*/true, b, /*b_has_ts=*/true); - } - - // For two events e1 and e2 whose timestamps are t1 and t2 respectively, - // Returns value: - // < 0 iff t1 < t2 - // == 0 iff t1 == t2 - // > 0 iff t1 > t2 - // Note that an all-zero byte array will be the smallest (oldest) timestamp - // of the same length. - virtual int CompareTimestamp(const Slice& /*ts1*/, - const Slice& /*ts2*/) const { - return 0; - } - - virtual int CompareWithoutTimestamp(const Slice& a, bool /*a_has_ts*/, - const Slice& b, bool /*b_has_ts*/) const { - return Compare(a, b); - } - - private: - size_t timestamp_size_; -}; - -// Return a builtin comparator that uses lexicographic byte-wise -// ordering. The result remains the property of this module and -// must not be deleted. -extern const Comparator* BytewiseComparator(); - -// Return a builtin comparator that uses reverse lexicographic byte-wise -// ordering. -extern const Comparator* ReverseBytewiseComparator(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/compression_type.h b/dist/darwin_amd64/include/rocksdb/compression_type.h deleted file mode 100644 index bfeb00b..0000000 --- a/dist/darwin_amd64/include/rocksdb/compression_type.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -// DB contents are stored in a set of blocks, each of which holds a -// sequence of key,value pairs. Each block may be compressed before -// being stored in a file. The following enum describes which -// compression method (if any) is used to compress a block. - -enum CompressionType : unsigned char { - // NOTE: do not change the values of existing entries, as these are - // part of the persistent format on disk. - kNoCompression = 0x0, - kSnappyCompression = 0x1, - kZlibCompression = 0x2, - kBZip2Compression = 0x3, - kLZ4Compression = 0x4, - kLZ4HCCompression = 0x5, - kXpressCompression = 0x6, - kZSTD = 0x7, - - // Only use kZSTDNotFinalCompression if you have to use ZSTD lib older than - // 0.8.0 or consider a possibility of downgrading the service or copying - // the database files to another service running with an older version of - // RocksDB that doesn't have kZSTD. Otherwise, you should use kZSTD. We will - // eventually remove the option from the public API. - kZSTDNotFinalCompression = 0x40, - - // kDisableCompressionOption is used to disable some compression options. - kDisableCompressionOption = 0xff, -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/concurrent_task_limiter.h b/dist/darwin_amd64/include/rocksdb/concurrent_task_limiter.h deleted file mode 100644 index 4fc6b79..0000000 --- a/dist/darwin_amd64/include/rocksdb/concurrent_task_limiter.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include "rocksdb/env.h" -#include "rocksdb/statistics.h" - -namespace ROCKSDB_NAMESPACE { - -class ConcurrentTaskLimiter { - public: - virtual ~ConcurrentTaskLimiter() {} - - // Returns a name that identifies this concurrent task limiter. - virtual const std::string& GetName() const = 0; - - // Set max concurrent tasks. - // limit = 0 means no new task allowed. - // limit < 0 means no limitation. - virtual void SetMaxOutstandingTask(int32_t limit) = 0; - - // Reset to unlimited max concurrent task. - virtual void ResetMaxOutstandingTask() = 0; - - // Returns current outstanding task count. - virtual int32_t GetOutstandingTask() const = 0; -}; - -// Create a ConcurrentTaskLimiter that can be shared with mulitple CFs -// across RocksDB instances to control concurrent tasks. -// -// @param name: Name of the limiter. -// @param limit: max concurrent tasks. -// limit = 0 means no new task allowed. -// limit < 0 means no limitation. -extern ConcurrentTaskLimiter* NewConcurrentTaskLimiter(const std::string& name, - int32_t limit); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/configurable.h b/dist/darwin_amd64/include/rocksdb/configurable.h deleted file mode 100644 index f4bfbf5..0000000 --- a/dist/darwin_amd64/include/rocksdb/configurable.h +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include -#include -#include -#include - -#include "rocksdb/rocksdb_namespace.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { -class Logger; -class ObjectRegistry; -class OptionTypeInfo; -struct ColumnFamilyOptions; -struct ConfigOptions; -struct DBOptions; - -// Configurable is a base class used by the rocksdb that describes a -// standard way of configuring objects. A Configurable object can: -// -> Populate itself given: -// - One or more "name/value" pair strings -// - A string repesenting the set of name=value properties -// - A map of name/value properties. -// -> Convert itself into its string representation -// -> Dump itself to a Logger -// -> Compare itself to another Configurable object to see if the two objects -// have equivalent options settings -// -// If a derived class calls RegisterOptions to register (by name) how its -// options objects are to be processed, this functionality can typically be -// handled by this class without additional overrides. Otherwise, the derived -// class will need to implement the methods for handling the corresponding -// functionality. -class Configurable { - protected: - friend class ConfigurableHelper; - struct RegisteredOptions { - // The name of the options being registered - std::string name; - // Pointer to the object being registered - void* opt_ptr; -#ifndef ROCKSDB_LITE - // The map of options being registered - const std::unordered_map* type_map; -#endif - }; - - public: - Configurable() : prepared_(false) {} - virtual ~Configurable() {} - - // Returns the raw pointer of the named options that is used by this - // object, or nullptr if this function is not supported. - // Since the return value is a raw pointer, the object owns the - // pointer and the caller should not delete the pointer. - // - // Note that changing the underlying options while the object - // is currently used by any open DB is undefined behavior. - // Developers should use DB::SetOption() instead to dynamically change - // options while the DB is open. - template - const T* GetOptions() const { - return GetOptions(T::kName()); - } - template - T* GetOptions() { - return GetOptions(T::kName()); - } - template - const T* GetOptions(const std::string& name) const { - return reinterpret_cast(GetOptionsPtr(name)); - } - template - T* GetOptions(const std::string& name) { - return reinterpret_cast(const_cast(GetOptionsPtr(name))); - } - - // Configures the options for this class based on the input parameters. - // On successful completion, the object is updated with the settings from - // the opt_map. - // If this method fails, an attempt is made to revert the object to original - // state. Note that the revert may not be the original state but may be an - // equivalent. For example, if the object contains an option that is a - // shared_ptr, the shared_ptr may not be the original one but a copy (e.g. not - // the Cache object that was passed in, but a Cache object of the same size). - // - // The acceptable values of the name/value pairs are documented with the - // specific class/instance. - // - // @param config_options Controls how the arguments are processed. - // @param opt_map Name/value pairs of the options to update - // @param unused If specified, this value will return the name/value - // pairs from opt_map that were NotFound for this object. - // @return OK If all values in the map were successfully updated - // If invoke_prepare_options is true, OK also implies - // PrepareOptions ran successfully. - // @return NotFound If any of the names in the opt_map were not valid - // for this object. If unused is specified, it will contain the - // collection of NotFound names. - // @return NotSupported If any of the names are valid but the object does - // not know how to convert the value. This can happen if, for example, - // there is some nested Configurable that cannot be created. - // @return InvalidArgument If any of the values cannot be successfully - // parsed. This can also be returned if PrepareOptions encounters an - // error. - // @see ConfigOptions for a description of the controls. - Status ConfigureFromMap( - const ConfigOptions& config_options, - const std::unordered_map& opt_map); - Status ConfigureFromMap( - const ConfigOptions& config_options, - const std::unordered_map& opt_map, - std::unordered_map* unused); - -#ifndef ROCKSDB_LITE - // Updates the named option to the input value, returning OK if successful. - // Note that ConfigureOption does not cause PrepareOptions to be invoked. - // @param config_options Controls how the name/value is processed. - // @param name The name of the option to update - // @param value The value to set for the named option - // @return OK If the named field was successfully updated to value. - // @return NotFound If the name is not valid for this object. - // @return NotSupported If the name is valid but the object does - // not know how to convert the value. This can happen if, for example, - // there is some nested Configurable that cannot be created. - // @return InvalidArgument If the value cannot be successfully parsed. - Status ConfigureOption(const ConfigOptions& config_options, - const std::string& name, const std::string& value); -#endif // ROCKSDB_LITE - - // Configures the options for this class based on the input parameters. - // On successful completion, the object is updated with the settings from - // the opt_map. If this method fails, an attempt is made to revert the - // object to original state. Note that the revert may not be the original - // state but may be an equivalent. - // @see ConfigureFromMap for more details - // @param config_options Controls how the arguments are processed. - // @param opt_str string containing the values to update. - // @param unused If specified, this value will return the name/value - // pairs from opt_map that were NotFound for this object. - // @return OK If all specified values were successfully updated - // If invoke_prepare_options is true, OK also implies - // PrepareOptions ran successfully. - // @return NotFound If any of the names were not valid for this object. - // If unused is specified, it will contain the collection of NotFound - // names. - // @return NotSupported If any of the names are valid but the object does - // not know how to convert the value. This can happen if, for example, - // there is some nested Configurable that cannot be created. - // @return InvalidArgument If any of the values cannot be successfully - // parsed. This can also be returned if PrepareOptions encounters an - // error. - Status ConfigureFromString(const ConfigOptions& config_options, - const std::string& opts); - - // Fills in result with the serialized options for this object. - // This is the inverse of ConfigureFromString. - // @param config_options Controls how serialization happens. - // @param result The string representation of this object. - // @return OK If the options for this object wer successfully serialized. - // @return InvalidArgument If one or more of the options could not be - // serialized. - Status GetOptionString(const ConfigOptions& config_options, - std::string* result) const; -#ifndef ROCKSDB_LITE - // Returns the serialized options for this object. - // This method is similar to GetOptionString with no errors. - // @param config_options Controls how serialization happens. - // @param prefix A string to prepend to every option. - // @return The serialized representation of the options for this object - std::string ToString(const ConfigOptions& config_options) const { - return ToString(config_options, ""); - } - std::string ToString(const ConfigOptions& config_options, - const std::string& prefix) const; - - // Returns the list of option names associated with this configurable - // @param config_options Controls how the names are returned - // @param result The set of option names for this object. Note that - // options that are deprecated or aliases are not returned. - // @return OK on success. - Status GetOptionNames(const ConfigOptions& config_options, - std::unordered_set* result) const; - - // Returns the value of the option associated with the input name - // This method is the functional inverse of ConfigureOption - // @param config_options Controls how the value is returned - // @param name The name of the option to return a value for. - // @param value The returned value associated with the named option. - // @return OK If the named field was successfully updated to value. - // @return NotFound If the name is not valid for this object. - // @param InvalidArgument If the name is valid for this object but - // its value cannot be serialized. - virtual Status GetOption(const ConfigOptions& config_options, - const std::string& name, std::string* value) const; -#endif // ROCKSDB_LITE - - // Checks to see if this Configurable is equivalent to other. - // This method assumes that the two objects are of the same class. - // @param config_options Controls how the options are compared. - // @param other The other object to compare to. - // @param mismatch If the objects do not match, this parameter contains - // the name of the option that triggered the match failure. - // @param True if the objects match, false otherwise. - virtual bool AreEquivalent(const ConfigOptions& config_options, - const Configurable* other, - std::string* name) const; - - // Returns a pretty-printed, human-readable version of the options. - // This method is typically used to dump the options to a log file. - // Classes should override this method - virtual std::string GetPrintableOptions() const { return ""; } - - // Validates that the settings are valid/consistent and performs any object - // initialization required by this object. This method may be called as part - // of Configure (if invoke_prepare_options is set), or may be invoked - // separately. - // - // Once an object has been prepared, non-mutable options can no longer be - // updated. - // - // Classes must override this method to provide any implementation-specific - // initialization, such as opening log files or setting up cache parameters. - // Implementations should be idempotent (e.g. don't re-open the log file or - // reconfigure the cache), as there is the potential this method can be called - // more than once. - // - // By default, this method will also prepare all nested (Inner and - // OptionType::kConfigurable) objects. - // - // @param config_options Controls how the object is prepared. Also contains - // a Logger and Env that can be used to initialize this object. - // @return OK If the object was successfully initialized. - // @return InvalidArgument If this object could not be successfull - // initialized. - virtual Status PrepareOptions(const ConfigOptions& config_options); - - // Checks to see if the settings are valid for this object. - // This method checks to see if the input DBOptions and ColumnFamilyOptions - // are valid for the settings of this object. For example, an Env might not - // support certain mmap modes or a TableFactory might require certain - // settings. - // - // By default, this method will also validate all nested (Inner and - // OptionType::kConfigurable) objects. - // - // @param db_opts The DBOptions to validate - // @param cf_opts The ColumnFamilyOptions to validate - // @return OK if the options are valid - // @return InvalidArgument If the arguments are not valid for the options - // of the current object. - virtual Status ValidateOptions(const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const; - - // Returns true if this object has been initialized via PrepareOptions, false - // otherwise. Once an object has been prepared, only mutable options may be - // changed. - virtual bool IsPrepared() const { return prepared_; } - - protected: - // True once the object is prepared. Once the object is prepared, only - // mutable options can be configured. - bool prepared_; - // If this class is a wrapper (has-a), this method should be - // over-written to return the inner configurable (like an EnvWrapper). - // This method should NOT recurse, but should instead return the - // direct Inner object. - virtual Configurable* Inner() const { return nullptr; } - - // Returns the raw pointer for the associated named option. - // The name is typically the name of an option registered via the - // Classes may override this method to provide further specialization (such as - // returning a sub-option) - // - // The default implemntation looks at the registered options. If the - // input name matches that of a registered option, the pointer registered - // with that name is returned. - // e.g,, RegisterOptions("X", &my_ptr, ...); GetOptionsPtr("X") returns - // "my_ptr" - virtual const void* GetOptionsPtr(const std::string& name) const; - - // Method for allowing options to be configured outside of the normal - // registered options framework. Classes may override this method if they - // wish to support non-standard options implementations (such as configuring - // themselves from constant or simple ":"-separated strings. - // - // The default implementation does nothing and returns OK - virtual Status ParseStringOptions(const ConfigOptions& config_options, - const std::string& opts_str); - - // Internal method to configure an object from a map of name-value options. - // This method uses the input config_options to drive the configuration of - // the options in opt_map. Any option name that cannot be found from the - // input set will be returned in "unused". - // - // Classes may override this method to extend the functionality if required. - // @param config_options Controls how the options are configured and errors - // handled. - // @param opts_map The set of options to configure - // @param unused Any options from opt_map that were not configured. - // @returns a Status based on the rules outlined in ConfigureFromMap - virtual Status ConfigureOptions( - const ConfigOptions& config_options, - const std::unordered_map& opts_map, - std::unordered_map* unused); - -#ifndef ROCKSDB_LITE - // Method that configures a the specific opt_name from opt_value. - // By default, this method calls opt_info.ParseOption with the - // input parameters. - // Classes may override this method to extend the functionality, or - // change the returned Status. - virtual Status ParseOption(const ConfigOptions& config_options, - const OptionTypeInfo& opt_info, - const std::string& opt_name, - const std::string& opt_value, void* opt_ptr); - - // Internal method to see if the single option name/info matches for this and - // that Classes may override this value to change its behavior. - // @param config_options Controls how the options are being matched - // @param opt_info The OptionTypeInfo registered for this option name - // that controls what field is matched (offset) and how (type). - // @param name The name associated with this opt_info. - // @param this_ptr The base pointer to compare to. This is the object - // registered for - // for this OptionTypeInfo. - // @param that_ptr The other pointer to compare to. This is the object - // registered for - // for this OptionTypeInfo. - // @param bad_name If the match fails, the name of the option that failed to - // match. - virtual bool OptionsAreEqual(const ConfigOptions& config_options, - const OptionTypeInfo& opt_info, - const std::string& name, - const void* const this_ptr, - const void* const that_ptr, - std::string* bad_name) const; -#endif -#ifndef ROCKSDB_LITE - // Internal method to serialize options (ToString) - // Classes may override this value to change its behavior. - virtual std::string SerializeOptions(const ConfigOptions& config_options, - const std::string& header) const; -#endif // ROCKSDB_LITE - - // Given a name (e.g. rocksdb.my.type.opt), returns the short name (opt) - virtual std::string GetOptionName(const std::string& long_name) const; - - private: - // Contains the collection of options (name, opt_ptr, opt_map) associated with - // this object. This collection is typically set in the constructor of the - // Configurable option via - std::vector options_; -}; -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/convenience.h b/dist/darwin_amd64/include/rocksdb/convenience.h deleted file mode 100644 index f861b2f..0000000 --- a/dist/darwin_amd64/include/rocksdb/convenience.h +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include - -#include "rocksdb/compression_type.h" -#include "rocksdb/db.h" -#include "rocksdb/status.h" -#include "rocksdb/table.h" - -namespace ROCKSDB_NAMESPACE { -class Env; -struct ColumnFamilyOptions; -struct DBOptions; -struct Options; - -// ConfigOptions containing the parameters/controls for -// comparing objects and converting to/from strings. -// These settings control how the methods -// treat errors (e.g. ignore_unknown_objects), the format -// of the serialization (e.g. delimiter), and how to compare -// options (sanity_level). -struct ConfigOptions { - // This enum defines the RocksDB options sanity level. - enum SanityLevel : unsigned char { - kSanityLevelNone = 0x01, // Performs no sanity check at all. - // Performs minimum check to ensure the RocksDB instance can be - // opened without corrupting / mis-interpreting the data. - kSanityLevelLooselyCompatible = 0x02, - // Perform exact match sanity check. - kSanityLevelExactMatch = 0xFF, - }; - - enum Depth { - kDepthDefault, // Traverse nested options that are not flagged as "shallow" - kDepthShallow, // Do not traverse into any nested options - kDepthDetailed, // Traverse nested options, overriding the options shallow - // setting - }; - - // When true, any unused options will be ignored and OK will be returned - bool ignore_unknown_options = false; - - // When true, any unsupported options will be ignored and OK will be returned - bool ignore_unsupported_options = true; - - // If the strings are escaped (old-style?) - bool input_strings_escaped = true; - - // Whether or not to invoke PrepareOptions after configure is called. - bool invoke_prepare_options = true; - - // The separator between options when converting to a string - std::string delimiter = ";"; - - // Controls how to traverse options during print/match stages - Depth depth = Depth::kDepthDefault; - - // Controls how options are serialized - // Controls how pedantic the comparison must be for equivalency - SanityLevel sanity_level = SanityLevel::kSanityLevelExactMatch; - // `file_readahead_size` is used for readahead for the option file. - size_t file_readahead_size = 512 * 1024; - - // The environment to use for this option - Env* env = Env::Default(); - - bool IsShallow() const { return depth == Depth::kDepthShallow; } - bool IsDetailed() const { return depth == Depth::kDepthDetailed; } - - bool IsCheckDisabled() const { - return sanity_level == SanityLevel::kSanityLevelNone; - } - - bool IsCheckEnabled(SanityLevel level) const { - return (level > SanityLevel::kSanityLevelNone && level <= sanity_level); - } -}; - -#ifndef ROCKSDB_LITE - -// The following set of functions provide a way to construct RocksDB Options -// from a string or a string-to-string map. Here're the general rule of -// setting option values from strings by type. Some RocksDB types are also -// supported in these APIs. Please refer to the comment of the function itself -// to find more information about how to config those RocksDB types. -// -// * Strings: -// Strings will be used as values directly without any truncating or -// trimming. -// -// * Booleans: -// - "true" or "1" => true -// - "false" or "0" => false. -// [Example]: -// - {"optimize_filters_for_hits", "1"} in GetColumnFamilyOptionsFromMap, or -// - "optimize_filters_for_hits=true" in GetColumnFamilyOptionsFromString. -// -// * Integers: -// Integers are converted directly from string, in addition to the following -// units that we support: -// - 'k' or 'K' => 2^10 -// - 'm' or 'M' => 2^20 -// - 'g' or 'G' => 2^30 -// - 't' or 'T' => 2^40 // only for unsigned int with sufficient bits. -// [Example]: -// - {"arena_block_size", "19G"} in GetColumnFamilyOptionsFromMap, or -// - "arena_block_size=19G" in GetColumnFamilyOptionsFromString. -// -// * Doubles / Floating Points: -// Doubles / Floating Points are converted directly from string. Note that -// currently we do not support units. -// [Example]: -// - {"hard_rate_limit", "2.1"} in GetColumnFamilyOptionsFromMap, or -// - "hard_rate_limit=2.1" in GetColumnFamilyOptionsFromString. -// * Array / Vectors: -// An array is specified by a list of values, where ':' is used as -// the delimiter to separate each value. -// [Example]: -// - {"compression_per_level", "kNoCompression:kSnappyCompression"} -// in GetColumnFamilyOptionsFromMap, or -// - "compression_per_level=kNoCompression:kSnappyCompression" in -// GetColumnFamilyOptionsFromMapString -// * Enums: -// The valid values of each enum are identical to the names of its constants. -// [Example]: -// - CompressionType: valid values are "kNoCompression", -// "kSnappyCompression", "kZlibCompression", "kBZip2Compression", ... -// - CompactionStyle: valid values are "kCompactionStyleLevel", -// "kCompactionStyleUniversal", "kCompactionStyleFIFO", and -// "kCompactionStyleNone". -// - -// Take a default ColumnFamilyOptions "base_options" in addition to a -// map "opts_map" of option name to option value to construct the new -// ColumnFamilyOptions "new_options". -// -// Below are the instructions of how to config some non-primitive-typed -// options in ColumnFOptions: -// -// * table_factory: -// table_factory can be configured using our custom nested-option syntax. -// -// {option_a=value_a; option_b=value_b; option_c=value_c; ... } -// -// A nested option is enclosed by two curly braces, within which there are -// multiple option assignments. Each assignment is of the form -// "variable_name=value;". -// -// Currently we support the following types of TableFactory: -// - BlockBasedTableFactory: -// Use name "block_based_table_factory" to initialize table_factory with -// BlockBasedTableFactory. Its BlockBasedTableFactoryOptions can be -// configured using the nested-option syntax. -// [Example]: -// * {"block_based_table_factory", "{block_cache=1M;block_size=4k;}"} -// is equivalent to assigning table_factory with a BlockBasedTableFactory -// that has 1M LRU block-cache with block size equals to 4k: -// ColumnFamilyOptions cf_opt; -// BlockBasedTableOptions blk_opt; -// blk_opt.block_cache = NewLRUCache(1 * 1024 * 1024); -// blk_opt.block_size = 4 * 1024; -// cf_opt.table_factory.reset(NewBlockBasedTableFactory(blk_opt)); -// - PlainTableFactory: -// Use name "plain_table_factory" to initialize table_factory with -// PlainTableFactory. Its PlainTableFactoryOptions can be configured using -// the nested-option syntax. -// [Example]: -// * {"plain_table_factory", "{user_key_len=66;bloom_bits_per_key=20;}"} -// -// * memtable_factory: -// Use "memtable" to config memtable_factory. Here are the supported -// memtable factories: -// - SkipList: -// Pass "skip_list:" to config memtable to use SkipList, -// or simply "skip_list" to use the default SkipList. -// [Example]: -// * {"memtable", "skip_list:5"} is equivalent to setting -// memtable to SkipListFactory(5). -// - PrefixHash: -// Pass "prfix_hash:" to config memtable -// to use PrefixHash, or simply "prefix_hash" to use the default -// PrefixHash. -// [Example]: -// * {"memtable", "prefix_hash:1000"} is equivalent to setting -// memtable to NewHashSkipListRepFactory(hash_bucket_count). -// - HashLinkedList: -// Pass "hash_linkedlist:" to config memtable -// to use HashLinkedList, or simply "hash_linkedlist" to use the default -// HashLinkedList. -// [Example]: -// * {"memtable", "hash_linkedlist:1000"} is equivalent to -// setting memtable to NewHashLinkListRepFactory(1000). -// - VectorRepFactory: -// Pass "vector:" to config memtable to use VectorRepFactory, -// or simply "vector" to use the default Vector memtable. -// [Example]: -// * {"memtable", "vector:1024"} is equivalent to setting memtable -// to VectorRepFactory(1024). -// -// * compression_opts: -// Use "compression_opts" to config compression_opts. The value format -// is of the form ":::". -// [Example]: -// * {"compression_opts", "4:5:6:7"} is equivalent to setting: -// ColumnFamilyOptions cf_opt; -// cf_opt.compression_opts.window_bits = 4; -// cf_opt.compression_opts.level = 5; -// cf_opt.compression_opts.strategy = 6; -// cf_opt.compression_opts.max_dict_bytes = 7; -// -// The GetColumnFamilyOptionsFromMap(ConfigOptions, ...) should be used; the -// alternative signature may be deprecated in a future release. The equivalent -// functionality can be achieved by setting the corresponding options in -// the ConfigOptions parameter. -// -// @param config_options controls how the map is processed. -// @param base_options the default options of the output "new_options". -// @param opts_map an option name to value map for specifying how "new_options" -// should be set. -// @param new_options the resulting options based on "base_options" with the -// change specified in "opts_map". -// @param input_strings_escaped when set to true, each escaped characters -// prefixed by '\' in the values of the opts_map will be further converted -// back to the raw string before assigning to the associated options. -// @param ignore_unknown_options when set to true, unknown options are ignored -// instead of resulting in an unknown-option error. -// @return Status::OK() on success. Otherwise, a non-ok status indicating -// error will be returned, and "new_options" will be set to "base_options". -// @return Status::NotFound means the one (or more) of the option name in -// the opts_map is not valid for this option -// @return Status::NotSupported means we do not know how to parse one of the -// value for this option -// @return Status::InvalidArgument means the one of the option values is not -// valid for this option. -Status GetColumnFamilyOptionsFromMap( - const ConfigOptions& config_options, - const ColumnFamilyOptions& base_options, - const std::unordered_map& opts_map, - ColumnFamilyOptions* new_options); -Status GetColumnFamilyOptionsFromMap( - const ColumnFamilyOptions& base_options, - const std::unordered_map& opts_map, - ColumnFamilyOptions* new_options, bool input_strings_escaped = false, - bool ignore_unknown_options = false); - -// Take a default DBOptions "base_options" in addition to a -// map "opts_map" of option name to option value to construct the new -// DBOptions "new_options". -// -// Below are the instructions of how to config some non-primitive-typed -// options in DBOptions: -// -// * rate_limiter_bytes_per_sec: -// RateLimiter can be configured directly by specifying its bytes_per_sec. -// [Example]: -// - Passing {"rate_limiter_bytes_per_sec", "1024"} is equivalent to -// passing NewGenericRateLimiter(1024) to rate_limiter_bytes_per_sec. -// -// The GetDBOptionsFromMap(ConfigOptions, ...) should be used; the -// alternative signature may be deprecated in a future release. The equivalent -// functionality can be achieved by setting the corresponding options in -// the ConfigOptions parameter. -// -// @param config_options controls how the map is processed. -// @param base_options the default options of the output "new_options". -// @param opts_map an option name to value map for specifying how "new_options" -// should be set. -// @param new_options the resulting options based on "base_options" with the -// change specified in "opts_map". -// @param input_strings_escaped when set to true, each escaped characters -// prefixed by '\' in the values of the opts_map will be further converted -// back to the raw string before assigning to the associated options. -// @param ignore_unknown_options when set to true, unknown options are ignored -// instead of resulting in an unknown-option error. -// @return Status::OK() on success. Otherwise, a non-ok status indicating -// error will be returned, and "new_options" will be set to "base_options". -// @return Status::NotFound means the one (or more) of the option name in -// the opts_map is not valid for this option -// @return Status::NotSupported means we do not know how to parse one of the -// value for this option -// @return Status::InvalidArgument means the one of the option values is not -// valid for this option. -Status GetDBOptionsFromMap( - const ConfigOptions& cfg_options, const DBOptions& base_options, - const std::unordered_map& opts_map, - DBOptions* new_options); -Status GetDBOptionsFromMap( - const DBOptions& base_options, - const std::unordered_map& opts_map, - DBOptions* new_options, bool input_strings_escaped = false, - bool ignore_unknown_options = false); - -// Take a default BlockBasedTableOptions "table_options" in addition to a -// map "opts_map" of option name to option value to construct the new -// BlockBasedTableOptions "new_table_options". -// -// Below are the instructions of how to config some non-primitive-typed -// options in BlockBasedTableOptions: -// -// * filter_policy: -// We currently only support the following FilterPolicy in the convenience -// functions: -// - BloomFilter: use "bloomfilter:[bits_per_key]:[use_block_based_builder]" -// to specify BloomFilter. The above string is equivalent to calling -// NewBloomFilterPolicy(bits_per_key, use_block_based_builder). -// [Example]: -// - Pass {"filter_policy", "bloomfilter:4:true"} in -// GetBlockBasedTableOptionsFromMap to use a BloomFilter with 4-bits -// per key and use_block_based_builder enabled. -// -// * block_cache / block_cache_compressed: -// We currently only support LRU cache in the GetOptions API. The LRU -// cache can be set by directly specifying its size. -// [Example]: -// - Passing {"block_cache", "1M"} in GetBlockBasedTableOptionsFromMap is -// equivalent to setting block_cache using NewLRUCache(1024 * 1024). -// -// The GetBlockBasedTableOptionsFromMap(ConfigOptions, ...) should be used; -// the alternative signature may be deprecated in a future release. The -// equivalent functionality can be achieved by setting the corresponding -// options in the ConfigOptions parameter. -// -// @param config_options controls how the map is processed. -// @param table_options the default options of the output "new_table_options". -// @param opts_map an option name to value map for specifying how -// "new_table_options" should be set. -// @param new_table_options the resulting options based on "table_options" -// with the change specified in "opts_map". -// @param input_strings_escaped when set to true, each escaped characters -// prefixed by '\' in the values of the opts_map will be further converted -// back to the raw string before assigning to the associated options. -// @param ignore_unknown_options when set to true, unknown options are ignored -// instead of resulting in an unknown-option error. -// @return Status::OK() on success. Otherwise, a non-ok status indicating -// error will be returned, and "new_table_options" will be set to -// "table_options". -Status GetBlockBasedTableOptionsFromMap( - const ConfigOptions& config_options, - const BlockBasedTableOptions& table_options, - const std::unordered_map& opts_map, - BlockBasedTableOptions* new_table_options); -Status GetBlockBasedTableOptionsFromMap( - const BlockBasedTableOptions& table_options, - const std::unordered_map& opts_map, - BlockBasedTableOptions* new_table_options, - bool input_strings_escaped = false, bool ignore_unknown_options = false); - -// Take a default PlainTableOptions "table_options" in addition to a -// map "opts_map" of option name to option value to construct the new -// PlainTableOptions "new_table_options". -// -// The GetPlainTableOptionsFromMap(ConfigOptions, ...) should be used; the -// alternative signature may be deprecated in a future release. The equivalent -// functionality can be achieved by setting the corresponding options in -// the ConfigOptions parameter. -// -// @param config_options controls how the map is processed. -// @param table_options the default options of the output "new_table_options". -// @param opts_map an option name to value map for specifying how -// "new_table_options" should be set. -// @param new_table_options the resulting options based on "table_options" -// with the change specified in "opts_map". -// @param input_strings_escaped when set to true, each escaped characters -// prefixed by '\' in the values of the opts_map will be further converted -// back to the raw string before assigning to the associated options. -// @param ignore_unknown_options when set to true, unknown options are ignored -// instead of resulting in an unknown-option error. -// @return Status::OK() on success. Otherwise, a non-ok status indicating -// error will be returned, and "new_table_options" will be set to -// "table_options". -Status GetPlainTableOptionsFromMap( - const ConfigOptions& config_options, const PlainTableOptions& table_options, - const std::unordered_map& opts_map, - PlainTableOptions* new_table_options); -Status GetPlainTableOptionsFromMap( - const PlainTableOptions& table_options, - const std::unordered_map& opts_map, - PlainTableOptions* new_table_options, bool input_strings_escaped = false, - bool ignore_unknown_options = false); - -// Take a string representation of option names and values, apply them into the -// base_options, and return the new options as a result. The string has the -// following format: -// "write_buffer_size=1024;max_write_buffer_number=2" -// Nested options config is also possible. For example, you can define -// BlockBasedTableOptions as part of the string for block-based table factory: -// "write_buffer_size=1024;block_based_table_factory={block_size=4k};" -// "max_write_buffer_num=2" -// -// -// The GetColumnFamilyOptionsFromString(ConfigOptions, ...) should be used; the -// alternative signature may be deprecated in a future release. The equivalent -// functionality can be achieved by setting the corresponding options in -// the ConfigOptions parameter. -Status GetColumnFamilyOptionsFromString(const ConfigOptions& config_options, - const ColumnFamilyOptions& base_options, - const std::string& opts_str, - ColumnFamilyOptions* new_options); -Status GetColumnFamilyOptionsFromString(const ColumnFamilyOptions& base_options, - const std::string& opts_str, - ColumnFamilyOptions* new_options); - -Status GetDBOptionsFromString(const ConfigOptions& config_options, - const DBOptions& base_options, - const std::string& opts_str, - DBOptions* new_options); - -Status GetDBOptionsFromString(const DBOptions& base_options, - const std::string& opts_str, - DBOptions* new_options); - -Status GetStringFromDBOptions(const ConfigOptions& config_options, - const DBOptions& db_options, - std::string* opts_str); - -Status GetStringFromDBOptions(std::string* opts_str, - const DBOptions& db_options, - const std::string& delimiter = "; "); - -Status GetStringFromColumnFamilyOptions(const ConfigOptions& config_options, - const ColumnFamilyOptions& cf_options, - std::string* opts_str); -Status GetStringFromColumnFamilyOptions(std::string* opts_str, - const ColumnFamilyOptions& cf_options, - const std::string& delimiter = "; "); -Status GetStringFromCompressionType(std::string* compression_str, - CompressionType compression_type); - -std::vector GetSupportedCompressions(); - -Status GetBlockBasedTableOptionsFromString( - const BlockBasedTableOptions& table_options, const std::string& opts_str, - BlockBasedTableOptions* new_table_options); -Status GetBlockBasedTableOptionsFromString( - const ConfigOptions& config_options, - const BlockBasedTableOptions& table_options, const std::string& opts_str, - BlockBasedTableOptions* new_table_options); - -Status GetPlainTableOptionsFromString(const PlainTableOptions& table_options, - const std::string& opts_str, - PlainTableOptions* new_table_options); -Status GetPlainTableOptionsFromString(const ConfigOptions& config_options, - const PlainTableOptions& table_options, - const std::string& opts_str, - PlainTableOptions* new_table_options); - -Status GetMemTableRepFactoryFromString( - const std::string& opts_str, - std::unique_ptr* new_mem_factory); - -Status GetOptionsFromString(const Options& base_options, - const std::string& opts_str, Options* new_options); -Status GetOptionsFromString(const ConfigOptions& config_options, - const Options& base_options, - const std::string& opts_str, Options* new_options); - -Status StringToMap(const std::string& opts_str, - std::unordered_map* opts_map); - -// Request stopping background work, if wait is true wait until it's done -void CancelAllBackgroundWork(DB* db, bool wait = false); - -// Delete files which are entirely in the given range -// Could leave some keys in the range which are in files which are not -// entirely in the range. Also leaves L0 files regardless of whether they're -// in the range. -// Snapshots before the delete might not see the data in the given range. -Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end, - bool include_end = true); - -// Delete files in multiple ranges at once -// Delete files in a lot of ranges one at a time can be slow, use this API for -// better performance in that case. -Status DeleteFilesInRanges(DB* db, ColumnFamilyHandle* column_family, - const RangePtr* ranges, size_t n, - bool include_end = true); - -// Verify the checksum of file -Status VerifySstFileChecksum(const Options& options, - const EnvOptions& env_options, - const std::string& file_path); - -// Verify the checksum of file -Status VerifySstFileChecksum(const Options& options, - const EnvOptions& env_options, - const ReadOptions& read_options, - const std::string& file_path); - -#endif // ROCKSDB_LITE - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/db.h b/dist/darwin_amd64/include/rocksdb/db.h deleted file mode 100644 index 6eab3f3..0000000 --- a/dist/darwin_amd64/include/rocksdb/db.h +++ /dev/null @@ -1,1685 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include "rocksdb/iterator.h" -#include "rocksdb/listener.h" -#include "rocksdb/metadata.h" -#include "rocksdb/options.h" -#include "rocksdb/snapshot.h" -#include "rocksdb/sst_file_writer.h" -#include "rocksdb/thread_status.h" -#include "rocksdb/transaction_log.h" -#include "rocksdb/types.h" -#include "rocksdb/version.h" - -#ifdef _WIN32 -// Windows API macro interference -#undef DeleteFile -#endif - -#if defined(__GNUC__) || defined(__clang__) -#define ROCKSDB_DEPRECATED_FUNC __attribute__((__deprecated__)) -#elif _WIN32 -#define ROCKSDB_DEPRECATED_FUNC __declspec(deprecated) -#endif - -namespace ROCKSDB_NAMESPACE { - -struct Options; -struct DBOptions; -struct ColumnFamilyOptions; -struct ReadOptions; -struct WriteOptions; -struct FlushOptions; -struct CompactionOptions; -struct CompactRangeOptions; -struct TableProperties; -struct ExternalSstFileInfo; -class WriteBatch; -class Env; -class EventListener; -class StatsHistoryIterator; -class TraceWriter; -#ifdef ROCKSDB_LITE -class CompactionJobInfo; -#endif -class FileSystem; - -extern const std::string kDefaultColumnFamilyName; -extern const std::string kPersistentStatsColumnFamilyName; -struct ColumnFamilyDescriptor { - std::string name; - ColumnFamilyOptions options; - ColumnFamilyDescriptor() - : name(kDefaultColumnFamilyName), options(ColumnFamilyOptions()) {} - ColumnFamilyDescriptor(const std::string& _name, - const ColumnFamilyOptions& _options) - : name(_name), options(_options) {} -}; - -class ColumnFamilyHandle { - public: - virtual ~ColumnFamilyHandle() {} - // Returns the name of the column family associated with the current handle. - virtual const std::string& GetName() const = 0; - // Returns the ID of the column family associated with the current handle. - virtual uint32_t GetID() const = 0; - // Fills "*desc" with the up-to-date descriptor of the column family - // associated with this handle. Since it fills "*desc" with the up-to-date - // information, this call might internally lock and release DB mutex to - // access the up-to-date CF options. In addition, all the pointer-typed - // options cannot be referenced any longer than the original options exist. - // - // Note that this function is not supported in RocksDBLite. - virtual Status GetDescriptor(ColumnFamilyDescriptor* desc) = 0; - // Returns the comparator of the column family associated with the - // current handle. - virtual const Comparator* GetComparator() const = 0; -}; - -static const int kMajorVersion = __ROCKSDB_MAJOR__; -static const int kMinorVersion = __ROCKSDB_MINOR__; - -// A range of keys -struct Range { - Slice start; - Slice limit; - - Range() {} - Range(const Slice& s, const Slice& l) : start(s), limit(l) {} -}; - -struct RangePtr { - const Slice* start; - const Slice* limit; - - RangePtr() : start(nullptr), limit(nullptr) {} - RangePtr(const Slice* s, const Slice* l) : start(s), limit(l) {} -}; - -// It is valid that files_checksums and files_checksum_func_names are both -// empty (no checksum informaiton is provided for ingestion). Otherwise, -// their sizes should be the same as external_files. The file order should -// be the same in three vectors and guaranteed by the caller. -struct IngestExternalFileArg { - ColumnFamilyHandle* column_family = nullptr; - std::vector external_files; - IngestExternalFileOptions options; - std::vector files_checksums; - std::vector files_checksum_func_names; -}; - -struct GetMergeOperandsOptions { - int expected_max_number_of_operands = 0; -}; - -// A collections of table properties objects, where -// key: is the table's file name. -// value: the table properties object of the given table. -typedef std::unordered_map> - TablePropertiesCollection; - -// A DB is a persistent, versioned ordered map from keys to values. -// A DB is safe for concurrent access from multiple threads without -// any external synchronization. -// DB is an abstract base class with one primary implementation (DBImpl) -// and a number of wrapper implementations. -class DB { - public: - // Open the database with the specified "name". - // Stores a pointer to a heap-allocated database in *dbptr and returns - // OK on success. - // Stores nullptr in *dbptr and returns a non-OK status on error. - // Caller should delete *dbptr when it is no longer needed. - static Status Open(const Options& options, const std::string& name, - DB** dbptr); - - // Open the database for read only. All DB interfaces - // that modify data, like put/delete, will return error. - // If the db is opened in read only mode, then no compactions - // will happen. - // - // Not supported in ROCKSDB_LITE, in which case the function will - // return Status::NotSupported. - static Status OpenForReadOnly(const Options& options, const std::string& name, - DB** dbptr, - bool error_if_wal_file_exists = false); - - // Open the database for read only with column families. When opening DB with - // read only, you can specify only a subset of column families in the - // database that should be opened. However, you always need to specify default - // column family. The default column family name is 'default' and it's stored - // in ROCKSDB_NAMESPACE::kDefaultColumnFamilyName - // - // Not supported in ROCKSDB_LITE, in which case the function will - // return Status::NotSupported. - static Status OpenForReadOnly( - const DBOptions& db_options, const std::string& name, - const std::vector& column_families, - std::vector* handles, DB** dbptr, - bool error_if_wal_file_exists = false); - - // The following OpenAsSecondary functions create a secondary instance that - // can dynamically tail the MANIFEST of a primary that must have already been - // created. User can call TryCatchUpWithPrimary to make the secondary - // instance catch up with primary (WAL tailing is NOT supported now) whenever - // the user feels necessary. Column families created by the primary after the - // secondary instance starts are currently ignored by the secondary instance. - // Column families opened by secondary and dropped by the primary will be - // dropped by secondary as well. However the user of the secondary instance - // can still access the data of such dropped column family as long as they - // do not destroy the corresponding column family handle. - // WAL tailing is not supported at present, but will arrive soon. - // - // The options argument specifies the options to open the secondary instance. - // The name argument specifies the name of the primary db that you have used - // to open the primary instance. - // The secondary_path argument points to a directory where the secondary - // instance stores its info log. - // The dbptr is an out-arg corresponding to the opened secondary instance. - // The pointer points to a heap-allocated database, and the user should - // delete it after use. - // Open DB as secondary instance with only the default column family. - // Return OK on success, non-OK on failures. - static Status OpenAsSecondary(const Options& options, const std::string& name, - const std::string& secondary_path, DB** dbptr); - - // Open DB as secondary instance with column families. You can open a subset - // of column families in secondary mode. - // The db_options specify the database specific options. - // The name argument specifies the name of the primary db that you have used - // to open the primary instance. - // The secondary_path argument points to a directory where the secondary - // instance stores its info log. - // The column_families argument specifieds a list of column families to open. - // If any of the column families does not exist, the function returns non-OK - // status. - // The handles is an out-arg corresponding to the opened database column - // familiy handles. - // The dbptr is an out-arg corresponding to the opened secondary instance. - // The pointer points to a heap-allocated database, and the caller should - // delete it after use. Before deleting the dbptr, the user should also - // delete the pointers stored in handles vector. - // Return OK on success, on-OK on failures. - static Status OpenAsSecondary( - const DBOptions& db_options, const std::string& name, - const std::string& secondary_path, - const std::vector& column_families, - std::vector* handles, DB** dbptr); - - // Open DB with column families. - // db_options specify database specific options - // column_families is the vector of all column families in the database, - // containing column family name and options. You need to open ALL column - // families in the database. To get the list of column families, you can use - // ListColumnFamilies(). Also, you can open only a subset of column families - // for read-only access. - // The default column family name is 'default' and it's stored - // in ROCKSDB_NAMESPACE::kDefaultColumnFamilyName. - // If everything is OK, handles will on return be the same size - // as column_families --- handles[i] will be a handle that you - // will use to operate on column family column_family[i]. - // Before delete DB, you have to close All column families by calling - // DestroyColumnFamilyHandle() with all the handles. - static Status Open(const DBOptions& db_options, const std::string& name, - const std::vector& column_families, - std::vector* handles, DB** dbptr); - - virtual Status Resume() { return Status::NotSupported(); } - - // Close the DB by releasing resources, closing files etc. This should be - // called before calling the destructor so that the caller can get back a - // status in case there are any errors. This will not fsync the WAL files. - // If syncing is required, the caller must first call SyncWAL(), or Write() - // using an empty write batch with WriteOptions.sync=true. - // Regardless of the return status, the DB must be freed. - // If the return status is Aborted(), closing fails because there is - // unreleased snapshot in the system. In this case, users can release - // the unreleased snapshots and try again and expect it to succeed. For - // other status, recalling Close() will be no-op. - // If the return status is NotSupported(), then the DB implementation does - // cleanup in the destructor - virtual Status Close() { return Status::NotSupported(); } - - // ListColumnFamilies will open the DB specified by argument name - // and return the list of all column families in that DB - // through column_families argument. The ordering of - // column families in column_families is unspecified. - static Status ListColumnFamilies(const DBOptions& db_options, - const std::string& name, - std::vector* column_families); - - // Abstract class ctor - DB() {} - // No copying allowed - DB(const DB&) = delete; - void operator=(const DB&) = delete; - - virtual ~DB(); - - // Create a column_family and return the handle of column family - // through the argument handle. - virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, - const std::string& column_family_name, - ColumnFamilyHandle** handle); - - // Bulk create column families with the same column family options. - // Return the handles of the column families through the argument handles. - // In case of error, the request may succeed partially, and handles will - // contain column family handles that it managed to create, and have size - // equal to the number of created column families. - virtual Status CreateColumnFamilies( - const ColumnFamilyOptions& options, - const std::vector& column_family_names, - std::vector* handles); - - // Bulk create column families. - // Return the handles of the column families through the argument handles. - // In case of error, the request may succeed partially, and handles will - // contain column family handles that it managed to create, and have size - // equal to the number of created column families. - virtual Status CreateColumnFamilies( - const std::vector& column_families, - std::vector* handles); - - // Drop a column family specified by column_family handle. This call - // only records a drop record in the manifest and prevents the column - // family from flushing and compacting. - virtual Status DropColumnFamily(ColumnFamilyHandle* column_family); - - // Bulk drop column families. This call only records drop records in the - // manifest and prevents the column families from flushing and compacting. - // In case of error, the request may succeed partially. User may call - // ListColumnFamilies to check the result. - virtual Status DropColumnFamilies( - const std::vector& column_families); - - // Close a column family specified by column_family handle and destroy - // the column family handle specified to avoid double deletion. This call - // deletes the column family handle by default. Use this method to - // close column family instead of deleting column family handle directly - virtual Status DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family); - - // Set the database entry for "key" to "value". - // If "key" already exists, it will be overwritten. - // Returns OK on success, and a non-OK status on error. - // Note: consider setting options.sync = true. - virtual Status Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) = 0; - virtual Status Put(const WriteOptions& options, const Slice& key, - const Slice& value) { - return Put(options, DefaultColumnFamily(), key, value); - } - - // Remove the database entry (if any) for "key". Returns OK on - // success, and a non-OK status on error. It is not an error if "key" - // did not exist in the database. - // Note: consider setting options.sync = true. - virtual Status Delete(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key) = 0; - virtual Status Delete(const WriteOptions& options, const Slice& key) { - return Delete(options, DefaultColumnFamily(), key); - } - - // Remove the database entry for "key". Requires that the key exists - // and was not overwritten. Returns OK on success, and a non-OK status - // on error. It is not an error if "key" did not exist in the database. - // - // If a key is overwritten (by calling Put() multiple times), then the result - // of calling SingleDelete() on this key is undefined. SingleDelete() only - // behaves correctly if there has been only one Put() for this key since the - // previous call to SingleDelete() for this key. - // - // This feature is currently an experimental performance optimization - // for a very specific workload. It is up to the caller to ensure that - // SingleDelete is only used for a key that is not deleted using Delete() or - // written using Merge(). Mixing SingleDelete operations with Deletes and - // Merges can result in undefined behavior. - // - // Note: consider setting options.sync = true. - virtual Status SingleDelete(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key) = 0; - virtual Status SingleDelete(const WriteOptions& options, const Slice& key) { - return SingleDelete(options, DefaultColumnFamily(), key); - } - - // Removes the database entries in the range ["begin_key", "end_key"), i.e., - // including "begin_key" and excluding "end_key". Returns OK on success, and - // a non-OK status on error. It is not an error if the database does not - // contain any existing data in the range ["begin_key", "end_key"). - // - // If "end_key" comes before "start_key" according to the user's comparator, - // a `Status::InvalidArgument` is returned. - // - // This feature is now usable in production, with the following caveats: - // 1) Accumulating many range tombstones in the memtable will degrade read - // performance; this can be avoided by manually flushing occasionally. - // 2) Limiting the maximum number of open files in the presence of range - // tombstones can degrade read performance. To avoid this problem, set - // max_open_files to -1 whenever possible. - virtual Status DeleteRange(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& begin_key, const Slice& end_key); - - // Merge the database entry for "key" with "value". Returns OK on success, - // and a non-OK status on error. The semantics of this operation is - // determined by the user provided merge_operator when opening DB. - // Note: consider setting options.sync = true. - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) = 0; - virtual Status Merge(const WriteOptions& options, const Slice& key, - const Slice& value) { - return Merge(options, DefaultColumnFamily(), key, value); - } - - // Apply the specified updates to the database. - // If `updates` contains no update, WAL will still be synced if - // options.sync=true. - // Returns OK on success, non-OK on failure. - // Note: consider setting options.sync = true. - virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; - - // If the database contains an entry for "key" store the - // corresponding value in *value and return OK. - // - // If timestamp is enabled and a non-null timestamp pointer is passed in, - // timestamp is returned. - // - // If there is no entry for "key" leave *value unchanged and return - // a status for which Status::IsNotFound() returns true. - // - // May return some other Status on an error. - virtual inline Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value) { - assert(value != nullptr); - PinnableSlice pinnable_val(value); - assert(!pinnable_val.IsPinned()); - auto s = Get(options, column_family, key, &pinnable_val); - if (s.ok() && pinnable_val.IsPinned()) { - value->assign(pinnable_val.data(), pinnable_val.size()); - } // else value is already assigned - return s; - } - virtual Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* value) = 0; - virtual Status Get(const ReadOptions& options, const Slice& key, - std::string* value) { - return Get(options, DefaultColumnFamily(), key, value); - } - - // Get() methods that return timestamp. Derived DB classes don't need to worry - // about this group of methods if they don't care about timestamp feature. - virtual inline Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value, std::string* timestamp) { - assert(value != nullptr); - PinnableSlice pinnable_val(value); - assert(!pinnable_val.IsPinned()); - auto s = Get(options, column_family, key, &pinnable_val, timestamp); - if (s.ok() && pinnable_val.IsPinned()) { - value->assign(pinnable_val.data(), pinnable_val.size()); - } // else value is already assigned - return s; - } - virtual Status Get(const ReadOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, PinnableSlice* /*value*/, - std::string* /*timestamp*/) { - return Status::NotSupported( - "Get() that returns timestamp is not implemented."); - } - virtual Status Get(const ReadOptions& options, const Slice& key, - std::string* value, std::string* timestamp) { - return Get(options, DefaultColumnFamily(), key, value, timestamp); - } - - // Returns all the merge operands corresponding to the key. If the - // number of merge operands in DB is greater than - // merge_operands_options.expected_max_number_of_operands - // no merge operands are returned and status is Incomplete. Merge operands - // returned are in the order of insertion. - // merge_operands- Points to an array of at-least - // merge_operands_options.expected_max_number_of_operands and the - // caller is responsible for allocating it. If the status - // returned is Incomplete then number_of_operands will contain - // the total number of merge operands found in DB for key. - virtual Status GetMergeOperands( - const ReadOptions& options, ColumnFamilyHandle* column_family, - const Slice& key, PinnableSlice* merge_operands, - GetMergeOperandsOptions* get_merge_operands_options, - int* number_of_operands) = 0; - - // Consistent Get of many keys across column families without the need - // for an explicit snapshot. NOTE: the implementation of this MultiGet API - // does not have the performance benefits of the void-returning MultiGet - // functions. - // - // If keys[i] does not exist in the database, then the i'th returned - // status will be one for which Status::IsNotFound() is true, and - // (*values)[i] will be set to some arbitrary value (often ""). Otherwise, - // the i'th returned status will have Status::ok() true, and (*values)[i] - // will store the value associated with keys[i]. - // - // (*values) will always be resized to be the same size as (keys). - // Similarly, the number of returned statuses will be the number of keys. - // Note: keys will not be "de-duplicated". Duplicate keys will return - // duplicate values in order. - virtual std::vector MultiGet( - const ReadOptions& options, - const std::vector& column_family, - const std::vector& keys, std::vector* values) = 0; - virtual std::vector MultiGet(const ReadOptions& options, - const std::vector& keys, - std::vector* values) { - return MultiGet( - options, - std::vector(keys.size(), DefaultColumnFamily()), - keys, values); - } - - virtual std::vector MultiGet( - const ReadOptions& /*options*/, - const std::vector& /*column_family*/, - const std::vector& keys, std::vector* /*values*/, - std::vector* /*timestamps*/) { - return std::vector( - keys.size(), Status::NotSupported( - "MultiGet() returning timestamps not implemented.")); - } - virtual std::vector MultiGet(const ReadOptions& options, - const std::vector& keys, - std::vector* values, - std::vector* timestamps) { - return MultiGet( - options, - std::vector(keys.size(), DefaultColumnFamily()), - keys, values, timestamps); - } - - // Overloaded MultiGet API that improves performance by batching operations - // in the read path for greater efficiency. Currently, only the block based - // table format with full filters are supported. Other table formats such - // as plain table, block based table with block based filters and - // partitioned indexes will still work, but will not get any performance - // benefits. - // Parameters - - // options - ReadOptions - // column_family - ColumnFamilyHandle* that the keys belong to. All the keys - // passed to the API are restricted to a single column family - // num_keys - Number of keys to lookup - // keys - Pointer to C style array of key Slices with num_keys elements - // values - Pointer to C style array of PinnableSlices with num_keys elements - // statuses - Pointer to C style array of Status with num_keys elements - // sorted_input - If true, it means the input keys are already sorted by key - // order, so the MultiGet() API doesn't have to sort them - // again. If false, the keys will be copied and sorted - // internally by the API - the input array will not be - // modified - virtual void MultiGet(const ReadOptions& options, - ColumnFamilyHandle* column_family, - const size_t num_keys, const Slice* keys, - PinnableSlice* values, Status* statuses, - const bool /*sorted_input*/ = false) { - std::vector cf; - std::vector user_keys; - std::vector status; - std::vector vals; - - for (size_t i = 0; i < num_keys; ++i) { - cf.emplace_back(column_family); - user_keys.emplace_back(keys[i]); - } - status = MultiGet(options, cf, user_keys, &vals); - std::copy(status.begin(), status.end(), statuses); - for (auto& value : vals) { - values->PinSelf(value); - values++; - } - } - - virtual void MultiGet(const ReadOptions& options, - ColumnFamilyHandle* column_family, - const size_t num_keys, const Slice* keys, - PinnableSlice* values, std::string* timestamps, - Status* statuses, const bool /*sorted_input*/ = false) { - std::vector cf; - std::vector user_keys; - std::vector status; - std::vector vals; - std::vector tss; - - for (size_t i = 0; i < num_keys; ++i) { - cf.emplace_back(column_family); - user_keys.emplace_back(keys[i]); - } - status = MultiGet(options, cf, user_keys, &vals, &tss); - std::copy(status.begin(), status.end(), statuses); - std::copy(tss.begin(), tss.end(), timestamps); - for (auto& value : vals) { - values->PinSelf(value); - values++; - } - } - - // Overloaded MultiGet API that improves performance by batching operations - // in the read path for greater efficiency. Currently, only the block based - // table format with full filters are supported. Other table formats such - // as plain table, block based table with block based filters and - // partitioned indexes will still work, but will not get any performance - // benefits. - // Parameters - - // options - ReadOptions - // column_family - ColumnFamilyHandle* that the keys belong to. All the keys - // passed to the API are restricted to a single column family - // num_keys - Number of keys to lookup - // keys - Pointer to C style array of key Slices with num_keys elements - // values - Pointer to C style array of PinnableSlices with num_keys elements - // statuses - Pointer to C style array of Status with num_keys elements - // sorted_input - If true, it means the input keys are already sorted by key - // order, so the MultiGet() API doesn't have to sort them - // again. If false, the keys will be copied and sorted - // internally by the API - the input array will not be - // modified - virtual void MultiGet(const ReadOptions& options, const size_t num_keys, - ColumnFamilyHandle** column_families, const Slice* keys, - PinnableSlice* values, Status* statuses, - const bool /*sorted_input*/ = false) { - std::vector cf; - std::vector user_keys; - std::vector status; - std::vector vals; - - for (size_t i = 0; i < num_keys; ++i) { - cf.emplace_back(column_families[i]); - user_keys.emplace_back(keys[i]); - } - status = MultiGet(options, cf, user_keys, &vals); - std::copy(status.begin(), status.end(), statuses); - for (auto& value : vals) { - values->PinSelf(value); - values++; - } - } - virtual void MultiGet(const ReadOptions& options, const size_t num_keys, - ColumnFamilyHandle** column_families, const Slice* keys, - PinnableSlice* values, std::string* timestamps, - Status* statuses, const bool /*sorted_input*/ = false) { - std::vector cf; - std::vector user_keys; - std::vector status; - std::vector vals; - std::vector tss; - - for (size_t i = 0; i < num_keys; ++i) { - cf.emplace_back(column_families[i]); - user_keys.emplace_back(keys[i]); - } - status = MultiGet(options, cf, user_keys, &vals, &tss); - std::copy(status.begin(), status.end(), statuses); - std::copy(tss.begin(), tss.end(), timestamps); - for (auto& value : vals) { - values->PinSelf(value); - values++; - } - } - - // If the key definitely does not exist in the database, then this method - // returns false, else true. If the caller wants to obtain value when the key - // is found in memory, a bool for 'value_found' must be passed. 'value_found' - // will be true on return if value has been set properly. - // This check is potentially lighter-weight than invoking DB::Get(). One way - // to make this lighter weight is to avoid doing any IOs. - // Default implementation here returns true and sets 'value_found' to false - virtual bool KeyMayExist(const ReadOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, std::string* /*value*/, - std::string* /*timestamp*/, - bool* value_found = nullptr) { - if (value_found != nullptr) { - *value_found = false; - } - return true; - } - - virtual bool KeyMayExist(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value, bool* value_found = nullptr) { - return KeyMayExist(options, column_family, key, value, - /*timestamp=*/nullptr, value_found); - } - - virtual bool KeyMayExist(const ReadOptions& options, const Slice& key, - std::string* value, bool* value_found = nullptr) { - return KeyMayExist(options, DefaultColumnFamily(), key, value, value_found); - } - - virtual bool KeyMayExist(const ReadOptions& options, const Slice& key, - std::string* value, std::string* timestamp, - bool* value_found = nullptr) { - return KeyMayExist(options, DefaultColumnFamily(), key, value, timestamp, - value_found); - } - - // Return a heap-allocated iterator over the contents of the database. - // The result of NewIterator() is initially invalid (caller must - // call one of the Seek methods on the iterator before using it). - // - // Caller should delete the iterator when it is no longer needed. - // The returned iterator should be deleted before this db is deleted. - virtual Iterator* NewIterator(const ReadOptions& options, - ColumnFamilyHandle* column_family) = 0; - virtual Iterator* NewIterator(const ReadOptions& options) { - return NewIterator(options, DefaultColumnFamily()); - } - // Returns iterators from a consistent database state across multiple - // column families. Iterators are heap allocated and need to be deleted - // before the db is deleted - virtual Status NewIterators( - const ReadOptions& options, - const std::vector& column_families, - std::vector* iterators) = 0; - - // Return a handle to the current DB state. Iterators created with - // this handle will all observe a stable snapshot of the current DB - // state. The caller must call ReleaseSnapshot(result) when the - // snapshot is no longer needed. - // - // nullptr will be returned if the DB fails to take a snapshot or does - // not support snapshot. - virtual const Snapshot* GetSnapshot() = 0; - - // Release a previously acquired snapshot. The caller must not - // use "snapshot" after this call. - virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; - -#ifndef ROCKSDB_LITE - // Contains all valid property arguments for GetProperty(). - // - // NOTE: Property names cannot end in numbers since those are interpreted as - // arguments, e.g., see kNumFilesAtLevelPrefix. - struct Properties { - // "rocksdb.num-files-at-level" - returns string containing the number - // of files at level , where is an ASCII representation of a - // level number (e.g., "0"). - static const std::string kNumFilesAtLevelPrefix; - - // "rocksdb.compression-ratio-at-level" - returns string containing the - // compression ratio of data at level , where is an ASCII - // representation of a level number (e.g., "0"). Here, compression - // ratio is defined as uncompressed data size / compressed file size. - // Returns "-1.0" if no open files at level . - static const std::string kCompressionRatioAtLevelPrefix; - - // "rocksdb.stats" - returns a multi-line string containing the data - // described by kCFStats followed by the data described by kDBStats. - static const std::string kStats; - - // "rocksdb.sstables" - returns a multi-line string summarizing current - // SST files. - static const std::string kSSTables; - - // "rocksdb.cfstats" - Both of "rocksdb.cfstats-no-file-histogram" and - // "rocksdb.cf-file-histogram" together. See below for description - // of the two. - static const std::string kCFStats; - - // "rocksdb.cfstats-no-file-histogram" - returns a multi-line string with - // general columm family stats per-level over db's lifetime ("L"), - // aggregated over db's lifetime ("Sum"), and aggregated over the - // interval since the last retrieval ("Int"). - // It could also be used to return the stats in the format of the map. - // In this case there will a pair of string to array of double for - // each level as well as for "Sum". "Int" stats will not be affected - // when this form of stats are retrieved. - static const std::string kCFStatsNoFileHistogram; - - // "rocksdb.cf-file-histogram" - print out how many file reads to every - // level, as well as the histogram of latency of single requests. - static const std::string kCFFileHistogram; - - // "rocksdb.dbstats" - returns a multi-line string with general database - // stats, both cumulative (over the db's lifetime) and interval (since - // the last retrieval of kDBStats). - static const std::string kDBStats; - - // "rocksdb.levelstats" - returns multi-line string containing the number - // of files per level and total size of each level (MB). - static const std::string kLevelStats; - - // "rocksdb.num-immutable-mem-table" - returns number of immutable - // memtables that have not yet been flushed. - static const std::string kNumImmutableMemTable; - - // "rocksdb.num-immutable-mem-table-flushed" - returns number of immutable - // memtables that have already been flushed. - static const std::string kNumImmutableMemTableFlushed; - - // "rocksdb.mem-table-flush-pending" - returns 1 if a memtable flush is - // pending; otherwise, returns 0. - static const std::string kMemTableFlushPending; - - // "rocksdb.num-running-flushes" - returns the number of currently running - // flushes. - static const std::string kNumRunningFlushes; - - // "rocksdb.compaction-pending" - returns 1 if at least one compaction is - // pending; otherwise, returns 0. - static const std::string kCompactionPending; - - // "rocksdb.num-running-compactions" - returns the number of currently - // running compactions. - static const std::string kNumRunningCompactions; - - // "rocksdb.background-errors" - returns accumulated number of background - // errors. - static const std::string kBackgroundErrors; - - // "rocksdb.cur-size-active-mem-table" - returns approximate size of active - // memtable (bytes). - static const std::string kCurSizeActiveMemTable; - - // "rocksdb.cur-size-all-mem-tables" - returns approximate size of active - // and unflushed immutable memtables (bytes). - static const std::string kCurSizeAllMemTables; - - // "rocksdb.size-all-mem-tables" - returns approximate size of active, - // unflushed immutable, and pinned immutable memtables (bytes). - static const std::string kSizeAllMemTables; - - // "rocksdb.num-entries-active-mem-table" - returns total number of entries - // in the active memtable. - static const std::string kNumEntriesActiveMemTable; - - // "rocksdb.num-entries-imm-mem-tables" - returns total number of entries - // in the unflushed immutable memtables. - static const std::string kNumEntriesImmMemTables; - - // "rocksdb.num-deletes-active-mem-table" - returns total number of delete - // entries in the active memtable. - static const std::string kNumDeletesActiveMemTable; - - // "rocksdb.num-deletes-imm-mem-tables" - returns total number of delete - // entries in the unflushed immutable memtables. - static const std::string kNumDeletesImmMemTables; - - // "rocksdb.estimate-num-keys" - returns estimated number of total keys in - // the active and unflushed immutable memtables and storage. - static const std::string kEstimateNumKeys; - - // "rocksdb.estimate-table-readers-mem" - returns estimated memory used for - // reading SST tables, excluding memory used in block cache (e.g., - // filter and index blocks). - static const std::string kEstimateTableReadersMem; - - // "rocksdb.is-file-deletions-enabled" - returns 0 if deletion of obsolete - // files is enabled; otherwise, returns a non-zero number. - static const std::string kIsFileDeletionsEnabled; - - // "rocksdb.num-snapshots" - returns number of unreleased snapshots of the - // database. - static const std::string kNumSnapshots; - - // "rocksdb.oldest-snapshot-time" - returns number representing unix - // timestamp of oldest unreleased snapshot. - static const std::string kOldestSnapshotTime; - - // "rocksdb.oldest-snapshot-sequence" - returns number representing - // sequence number of oldest unreleased snapshot. - static const std::string kOldestSnapshotSequence; - - // "rocksdb.num-live-versions" - returns number of live versions. `Version` - // is an internal data structure. See version_set.h for details. More - // live versions often mean more SST files are held from being deleted, - // by iterators or unfinished compactions. - static const std::string kNumLiveVersions; - - // "rocksdb.current-super-version-number" - returns number of current LSM - // version. It is a uint64_t integer number, incremented after there is - // any change to the LSM tree. The number is not preserved after restarting - // the DB. After DB restart, it will start from 0 again. - static const std::string kCurrentSuperVersionNumber; - - // "rocksdb.estimate-live-data-size" - returns an estimate of the amount of - // live data in bytes. - static const std::string kEstimateLiveDataSize; - - // "rocksdb.min-log-number-to-keep" - return the minimum log number of the - // log files that should be kept. - static const std::string kMinLogNumberToKeep; - - // "rocksdb.min-obsolete-sst-number-to-keep" - return the minimum file - // number for an obsolete SST to be kept. The max value of `uint64_t` - // will be returned if all obsolete files can be deleted. - static const std::string kMinObsoleteSstNumberToKeep; - - // "rocksdb.total-sst-files-size" - returns total size (bytes) of all SST - // files. - // WARNING: may slow down online queries if there are too many files. - static const std::string kTotalSstFilesSize; - - // "rocksdb.live-sst-files-size" - returns total size (bytes) of all SST - // files belong to the latest LSM tree. - static const std::string kLiveSstFilesSize; - - // "rocksdb.base-level" - returns number of level to which L0 data will be - // compacted. - static const std::string kBaseLevel; - - // "rocksdb.estimate-pending-compaction-bytes" - returns estimated total - // number of bytes compaction needs to rewrite to get all levels down - // to under target size. Not valid for other compactions than level- - // based. - static const std::string kEstimatePendingCompactionBytes; - - // "rocksdb.aggregated-table-properties" - returns a string representation - // of the aggregated table properties of the target column family. - static const std::string kAggregatedTableProperties; - - // "rocksdb.aggregated-table-properties-at-level", same as the previous - // one but only returns the aggregated table properties of the - // specified level "N" at the target column family. - static const std::string kAggregatedTablePropertiesAtLevel; - - // "rocksdb.actual-delayed-write-rate" - returns the current actual delayed - // write rate. 0 means no delay. - static const std::string kActualDelayedWriteRate; - - // "rocksdb.is-write-stopped" - Return 1 if write has been stopped. - static const std::string kIsWriteStopped; - - // "rocksdb.estimate-oldest-key-time" - returns an estimation of - // oldest key timestamp in the DB. Currently only available for - // FIFO compaction with - // compaction_options_fifo.allow_compaction = false. - static const std::string kEstimateOldestKeyTime; - - // "rocksdb.block-cache-capacity" - returns block cache capacity. - static const std::string kBlockCacheCapacity; - - // "rocksdb.block-cache-usage" - returns the memory size for the entries - // residing in block cache. - static const std::string kBlockCacheUsage; - - // "rocksdb.block-cache-pinned-usage" - returns the memory size for the - // entries being pinned. - static const std::string kBlockCachePinnedUsage; - - // "rocksdb.options-statistics" - returns multi-line string - // of options.statistics - static const std::string kOptionsStatistics; - }; -#endif /* ROCKSDB_LITE */ - - // DB implementations can export properties about their state via this method. - // If "property" is a valid property understood by this DB implementation (see - // Properties struct above for valid options), fills "*value" with its current - // value and returns true. Otherwise, returns false. - virtual bool GetProperty(ColumnFamilyHandle* column_family, - const Slice& property, std::string* value) = 0; - virtual bool GetProperty(const Slice& property, std::string* value) { - return GetProperty(DefaultColumnFamily(), property, value); - } - virtual bool GetMapProperty(ColumnFamilyHandle* column_family, - const Slice& property, - std::map* value) = 0; - virtual bool GetMapProperty(const Slice& property, - std::map* value) { - return GetMapProperty(DefaultColumnFamily(), property, value); - } - - // Similar to GetProperty(), but only works for a subset of properties whose - // return value is an integer. Return the value by integer. Supported - // properties: - // "rocksdb.num-immutable-mem-table" - // "rocksdb.mem-table-flush-pending" - // "rocksdb.compaction-pending" - // "rocksdb.background-errors" - // "rocksdb.cur-size-active-mem-table" - // "rocksdb.cur-size-all-mem-tables" - // "rocksdb.size-all-mem-tables" - // "rocksdb.num-entries-active-mem-table" - // "rocksdb.num-entries-imm-mem-tables" - // "rocksdb.num-deletes-active-mem-table" - // "rocksdb.num-deletes-imm-mem-tables" - // "rocksdb.estimate-num-keys" - // "rocksdb.estimate-table-readers-mem" - // "rocksdb.is-file-deletions-enabled" - // "rocksdb.num-snapshots" - // "rocksdb.oldest-snapshot-time" - // "rocksdb.num-live-versions" - // "rocksdb.current-super-version-number" - // "rocksdb.estimate-live-data-size" - // "rocksdb.min-log-number-to-keep" - // "rocksdb.min-obsolete-sst-number-to-keep" - // "rocksdb.total-sst-files-size" - // "rocksdb.live-sst-files-size" - // "rocksdb.base-level" - // "rocksdb.estimate-pending-compaction-bytes" - // "rocksdb.num-running-compactions" - // "rocksdb.num-running-flushes" - // "rocksdb.actual-delayed-write-rate" - // "rocksdb.is-write-stopped" - // "rocksdb.estimate-oldest-key-time" - // "rocksdb.block-cache-capacity" - // "rocksdb.block-cache-usage" - // "rocksdb.block-cache-pinned-usage" - virtual bool GetIntProperty(ColumnFamilyHandle* column_family, - const Slice& property, uint64_t* value) = 0; - virtual bool GetIntProperty(const Slice& property, uint64_t* value) { - return GetIntProperty(DefaultColumnFamily(), property, value); - } - - // Reset internal stats for DB and all column families. - // Note this doesn't reset options.statistics as it is not owned by - // DB. - virtual Status ResetStats() { - return Status::NotSupported("Not implemented"); - } - - // Same as GetIntProperty(), but this one returns the aggregated int - // property from all column families. - virtual bool GetAggregatedIntProperty(const Slice& property, - uint64_t* value) = 0; - - // Flags for DB::GetSizeApproximation that specify whether memtable - // stats should be included, or file stats approximation or both - enum SizeApproximationFlags : uint8_t { - NONE = 0, - INCLUDE_MEMTABLES = 1 << 0, - INCLUDE_FILES = 1 << 1 - }; - - // For each i in [0,n-1], store in "sizes[i]", the approximate - // file system space used by keys in "[range[i].start .. range[i].limit)" - // in a single column family. - // - // Note that the returned sizes measure file system space usage, so - // if the user data compresses by a factor of ten, the returned - // sizes will be one-tenth the size of the corresponding user data size. - virtual Status GetApproximateSizes(const SizeApproximationOptions& options, - ColumnFamilyHandle* column_family, - const Range* ranges, int n, - uint64_t* sizes) = 0; - - // Simpler versions of the GetApproximateSizes() method above. - // The include_flags argumenbt must of type DB::SizeApproximationFlags - // and can not be NONE. - virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, - const Range* ranges, int n, uint64_t* sizes, - uint8_t include_flags = INCLUDE_FILES) { - SizeApproximationOptions options; - options.include_memtabtles = - (include_flags & SizeApproximationFlags::INCLUDE_MEMTABLES) != 0; - options.include_files = - (include_flags & SizeApproximationFlags::INCLUDE_FILES) != 0; - GetApproximateSizes(options, column_family, ranges, n, sizes); - } - virtual void GetApproximateSizes(const Range* ranges, int n, uint64_t* sizes, - uint8_t include_flags = INCLUDE_FILES) { - GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes, include_flags); - } - - // The method is similar to GetApproximateSizes, except it - // returns approximate number of records in memtables. - virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family, - const Range& range, - uint64_t* const count, - uint64_t* const size) = 0; - virtual void GetApproximateMemTableStats(const Range& range, - uint64_t* const count, - uint64_t* const size) { - GetApproximateMemTableStats(DefaultColumnFamily(), range, count, size); - } - - // Deprecated versions of GetApproximateSizes - ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes( - const Range* range, int n, uint64_t* sizes, bool include_memtable) { - uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES; - if (include_memtable) { - include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES; - } - GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, include_flags); - } - ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes( - ColumnFamilyHandle* column_family, const Range* range, int n, - uint64_t* sizes, bool include_memtable) { - uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES; - if (include_memtable) { - include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES; - } - GetApproximateSizes(column_family, range, n, sizes, include_flags); - } - - // Compact the underlying storage for the key range [*begin,*end]. - // The actual compaction interval might be superset of [*begin, *end]. - // In particular, deleted and overwritten versions are discarded, - // and the data is rearranged to reduce the cost of operations - // needed to access the data. This operation should typically only - // be invoked by users who understand the underlying implementation. - // - // begin==nullptr is treated as a key before all keys in the database. - // end==nullptr is treated as a key after all keys in the database. - // Therefore the following call will compact the entire database: - // db->CompactRange(options, nullptr, nullptr); - // Note that after the entire database is compacted, all data are pushed - // down to the last level containing any data. If the total data size after - // compaction is reduced, that level might not be appropriate for hosting all - // the files. In this case, client could set options.change_level to true, to - // move the files back to the minimum level capable of holding the data set - // or a given level (specified by non-negative options.target_level). - virtual Status CompactRange(const CompactRangeOptions& options, - ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end) = 0; - virtual Status CompactRange(const CompactRangeOptions& options, - const Slice* begin, const Slice* end) { - return CompactRange(options, DefaultColumnFamily(), begin, end); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status CompactRange( - ColumnFamilyHandle* column_family, const Slice* begin, const Slice* end, - bool change_level = false, int target_level = -1, - uint32_t target_path_id = 0) { - CompactRangeOptions options; - options.change_level = change_level; - options.target_level = target_level; - options.target_path_id = target_path_id; - return CompactRange(options, column_family, begin, end); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status CompactRange( - const Slice* begin, const Slice* end, bool change_level = false, - int target_level = -1, uint32_t target_path_id = 0) { - CompactRangeOptions options; - options.change_level = change_level; - options.target_level = target_level; - options.target_path_id = target_path_id; - return CompactRange(options, DefaultColumnFamily(), begin, end); - } - - virtual Status SetOptions( - ColumnFamilyHandle* /*column_family*/, - const std::unordered_map& /*new_options*/) { - return Status::NotSupported("Not implemented"); - } - virtual Status SetOptions( - const std::unordered_map& new_options) { - return SetOptions(DefaultColumnFamily(), new_options); - } - - virtual Status SetDBOptions( - const std::unordered_map& new_options) = 0; - - // CompactFiles() inputs a list of files specified by file numbers and - // compacts them to the specified level. Note that the behavior is different - // from CompactRange() in that CompactFiles() performs the compaction job - // using the CURRENT thread. - // - // @see GetDataBaseMetaData - // @see GetColumnFamilyMetaData - virtual Status CompactFiles( - const CompactionOptions& compact_options, - ColumnFamilyHandle* column_family, - const std::vector& input_file_names, const int output_level, - const int output_path_id = -1, - std::vector* const output_file_names = nullptr, - CompactionJobInfo* compaction_job_info = nullptr) = 0; - - virtual Status CompactFiles( - const CompactionOptions& compact_options, - const std::vector& input_file_names, const int output_level, - const int output_path_id = -1, - std::vector* const output_file_names = nullptr, - CompactionJobInfo* compaction_job_info = nullptr) { - return CompactFiles(compact_options, DefaultColumnFamily(), - input_file_names, output_level, output_path_id, - output_file_names, compaction_job_info); - } - - // This function will wait until all currently running background processes - // finish. After it returns, no background process will be run until - // ContinueBackgroundWork is called, once for each preceding OK-returning - // call to PauseBackgroundWork. - virtual Status PauseBackgroundWork() = 0; - virtual Status ContinueBackgroundWork() = 0; - - // This function will enable automatic compactions for the given column - // families if they were previously disabled. The function will first set the - // disable_auto_compactions option for each column family to 'false', after - // which it will schedule a flush/compaction. - // - // NOTE: Setting disable_auto_compactions to 'false' through SetOptions() API - // does NOT schedule a flush/compaction afterwards, and only changes the - // parameter itself within the column family option. - // - virtual Status EnableAutoCompaction( - const std::vector& column_family_handles) = 0; - - virtual void DisableManualCompaction() = 0; - virtual void EnableManualCompaction() = 0; - - // Number of levels used for this DB. - virtual int NumberLevels(ColumnFamilyHandle* column_family) = 0; - virtual int NumberLevels() { return NumberLevels(DefaultColumnFamily()); } - - // Maximum level to which a new compacted memtable is pushed if it - // does not create overlap. - virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) = 0; - virtual int MaxMemCompactionLevel() { - return MaxMemCompactionLevel(DefaultColumnFamily()); - } - - // Number of files in level-0 that would stop writes. - virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) = 0; - virtual int Level0StopWriteTrigger() { - return Level0StopWriteTrigger(DefaultColumnFamily()); - } - - // Get DB name -- the exact same name that was provided as an argument to - // DB::Open() - virtual const std::string& GetName() const = 0; - - // Get Env object from the DB - virtual Env* GetEnv() const = 0; - - virtual FileSystem* GetFileSystem() const; - - // Get DB Options that we use. During the process of opening the - // column family, the options provided when calling DB::Open() or - // DB::CreateColumnFamily() will have been "sanitized" and transformed - // in an implementation-defined manner. - virtual Options GetOptions(ColumnFamilyHandle* column_family) const = 0; - virtual Options GetOptions() const { - return GetOptions(DefaultColumnFamily()); - } - - virtual DBOptions GetDBOptions() const = 0; - - // Flush all mem-table data. - // Flush a single column family, even when atomic flush is enabled. To flush - // multiple column families, use Flush(options, column_families). - virtual Status Flush(const FlushOptions& options, - ColumnFamilyHandle* column_family) = 0; - virtual Status Flush(const FlushOptions& options) { - return Flush(options, DefaultColumnFamily()); - } - // Flushes multiple column families. - // If atomic flush is not enabled, Flush(options, column_families) is - // equivalent to calling Flush(options, column_family) multiple times. - // If atomic flush is enabled, Flush(options, column_families) will flush all - // column families specified in 'column_families' up to the latest sequence - // number at the time when flush is requested. - // Note that RocksDB 5.15 and earlier may not be able to open later versions - // with atomic flush enabled. - virtual Status Flush( - const FlushOptions& options, - const std::vector& column_families) = 0; - - // Flush the WAL memory buffer to the file. If sync is true, it calls SyncWAL - // afterwards. - virtual Status FlushWAL(bool /*sync*/) { - return Status::NotSupported("FlushWAL not implemented"); - } - // Sync the wal. Note that Write() followed by SyncWAL() is not exactly the - // same as Write() with sync=true: in the latter case the changes won't be - // visible until the sync is done. - // Currently only works if allow_mmap_writes = false in Options. - virtual Status SyncWAL() = 0; - - // Lock the WAL. Also flushes the WAL after locking. - virtual Status LockWAL() { - return Status::NotSupported("LockWAL not implemented"); - } - - // Unlock the WAL. - virtual Status UnlockWAL() { - return Status::NotSupported("UnlockWAL not implemented"); - } - - // The sequence number of the most recent transaction. - virtual SequenceNumber GetLatestSequenceNumber() const = 0; - - // Instructs DB to preserve deletes with sequence numbers >= passed seqnum. - // Has no effect if DBOptions.preserve_deletes is set to false. - // This function assumes that user calls this function with monotonically - // increasing seqnums (otherwise we can't guarantee that a particular delete - // hasn't been already processed); returns true if the value was successfully - // updated, false if user attempted to call if with seqnum <= current value. - virtual bool SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) = 0; - - // Prevent file deletions. Compactions will continue to occur, - // but no obsolete files will be deleted. Calling this multiple - // times have the same effect as calling it once. - virtual Status DisableFileDeletions() = 0; - - // Allow compactions to delete obsolete files. - // If force == true, the call to EnableFileDeletions() will guarantee that - // file deletions are enabled after the call, even if DisableFileDeletions() - // was called multiple times before. - // If force == false, EnableFileDeletions will only enable file deletion - // after it's been called at least as many times as DisableFileDeletions(), - // enabling the two methods to be called by two threads concurrently without - // synchronization -- i.e., file deletions will be enabled only after both - // threads call EnableFileDeletions() - virtual Status EnableFileDeletions(bool force = true) = 0; - -#ifndef ROCKSDB_LITE - // GetLiveFiles followed by GetSortedWalFiles can generate a lossless backup - - // Retrieve the list of all files in the database. The files are - // relative to the dbname and are not absolute paths. Despite being relative - // paths, the file names begin with "/". The valid size of the manifest file - // is returned in manifest_file_size. The manifest file is an ever growing - // file, but only the portion specified by manifest_file_size is valid for - // this snapshot. Setting flush_memtable to true does Flush before recording - // the live files. Setting flush_memtable to false is useful when we don't - // want to wait for flush which may have to wait for compaction to complete - // taking an indeterminate time. - // - // In case you have multiple column families, even if flush_memtable is true, - // you still need to call GetSortedWalFiles after GetLiveFiles to compensate - // for new data that arrived to already-flushed column families while other - // column families were flushing - virtual Status GetLiveFiles(std::vector&, - uint64_t* manifest_file_size, - bool flush_memtable = true) = 0; - - // Retrieve the sorted list of all wal files with earliest file first - virtual Status GetSortedWalFiles(VectorLogPtr& files) = 0; - - // Retrieve information about the current wal file - // - // Note that the log might have rolled after this call in which case - // the current_log_file would not point to the current log file. - // - // Additionally, for the sake of optimization current_log_file->StartSequence - // would always be set to 0 - virtual Status GetCurrentWalFile( - std::unique_ptr* current_log_file) = 0; - - // Retrieves the creation time of the oldest file in the DB. - // This API only works if max_open_files = -1, if it is not then - // Status returned is Status::NotSupported() - // The file creation time is set using the env provided to the DB. - // If the DB was created from a very old release then its possible that - // the SST files might not have file_creation_time property and even after - // moving to a newer release its possible that some files never got compacted - // and may not have file_creation_time property. In both the cases - // file_creation_time is considered 0 which means this API will return - // creation_time = 0 as there wouldn't be a timestamp lower than 0. - virtual Status GetCreationTimeOfOldestFile(uint64_t* creation_time) = 0; - - // Note: this API is not yet consistent with WritePrepared transactions. - // Sets iter to an iterator that is positioned at a write-batch containing - // seq_number. If the sequence number is non existent, it returns an iterator - // at the first available seq_no after the requested seq_no - // Returns Status::OK if iterator is valid - // Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to - // use this api, else the WAL files will get - // cleared aggressively and the iterator might keep getting invalid before - // an update is read. - virtual Status GetUpdatesSince( - SequenceNumber seq_number, std::unique_ptr* iter, - const TransactionLogIterator::ReadOptions& read_options = - TransactionLogIterator::ReadOptions()) = 0; - -// Windows API macro interference -#undef DeleteFile - // WARNING: This API is planned for removal in RocksDB 7.0 since it does not - // operate at the proper level of abstraction for a key-value store, and its - // contract/restrictions are poorly documented. For example, it returns non-OK - // `Status` for non-bottommost files and files undergoing compaction. Since we - // do not plan to maintain it, the contract will likely remain underspecified - // until its removal. Any user is encouraged to read the implementation - // carefully and migrate away from it when possible. - // - // Delete the file name from the db directory and update the internal state to - // reflect that. Supports deletion of sst and log files only. 'name' must be - // path relative to the db directory. eg. 000001.sst, /archive/000003.log - virtual Status DeleteFile(std::string name) = 0; - - // Returns a list of all table files with their level, start key - // and end key - virtual void GetLiveFilesMetaData( - std::vector* /*metadata*/) {} - - // Return a list of all table file checksum info. - // Note: This function might be of limited use because it cannot be - // synchronized with GetLiveFiles. - virtual Status GetLiveFilesChecksumInfo(FileChecksumList* checksum_list) = 0; - - // Obtains the meta data of the specified column family of the DB. - virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/, - ColumnFamilyMetaData* /*metadata*/) {} - - // Get the metadata of the default column family. - void GetColumnFamilyMetaData(ColumnFamilyMetaData* metadata) { - GetColumnFamilyMetaData(DefaultColumnFamily(), metadata); - } - - // IngestExternalFile() will load a list of external SST files (1) into the DB - // Two primary modes are supported: - // - Duplicate keys in the new files will overwrite exiting keys (default) - // - Duplicate keys will be skipped (set ingest_behind=true) - // In the first mode we will try to find the lowest possible level that - // the file can fit in, and ingest the file into this level (2). A file that - // have a key range that overlap with the memtable key range will require us - // to Flush the memtable first before ingesting the file. - // In the second mode we will always ingest in the bottom most level (see - // docs to IngestExternalFileOptions::ingest_behind). - // - // (1) External SST files can be created using SstFileWriter - // (2) We will try to ingest the files to the lowest possible level - // even if the file compression doesn't match the level compression - // (3) If IngestExternalFileOptions->ingest_behind is set to true, - // we always ingest at the bottommost level, which should be reserved - // for this purpose (see DBOPtions::allow_ingest_behind flag). - virtual Status IngestExternalFile( - ColumnFamilyHandle* column_family, - const std::vector& external_files, - const IngestExternalFileOptions& options) = 0; - - virtual Status IngestExternalFile( - const std::vector& external_files, - const IngestExternalFileOptions& options) { - return IngestExternalFile(DefaultColumnFamily(), external_files, options); - } - - // IngestExternalFiles() will ingest files for multiple column families, and - // record the result atomically to the MANIFEST. - // If this function returns OK, all column families' ingestion must succeed. - // If this function returns NOK, or the process crashes, then non-of the - // files will be ingested into the database after recovery. - // Note that it is possible for application to observe a mixed state during - // the execution of this function. If the user performs range scan over the - // column families with iterators, iterator on one column family may return - // ingested data, while iterator on other column family returns old data. - // Users can use snapshot for a consistent view of data. - // If your db ingests multiple SST files using this API, i.e. args.size() - // > 1, then RocksDB 5.15 and earlier will not be able to open it. - // - // REQUIRES: each arg corresponds to a different column family: namely, for - // 0 <= i < j < len(args), args[i].column_family != args[j].column_family. - virtual Status IngestExternalFiles( - const std::vector& args) = 0; - - // CreateColumnFamilyWithImport() will create a new column family with - // column_family_name and import external SST files specified in metadata into - // this column family. - // (1) External SST files can be created using SstFileWriter. - // (2) External SST files can be exported from a particular column family in - // an existing DB. - // Option in import_options specifies whether the external files are copied or - // moved (default is copy). When option specifies copy, managing files at - // external_file_path is caller's responsibility. When option specifies a - // move, the call ensures that the specified files at external_file_path are - // deleted on successful return and files are not modified on any error - // return. - // On error return, column family handle returned will be nullptr. - // ColumnFamily will be present on successful return and will not be present - // on error return. ColumnFamily may be present on any crash during this call. - virtual Status CreateColumnFamilyWithImport( - const ColumnFamilyOptions& options, const std::string& column_family_name, - const ImportColumnFamilyOptions& import_options, - const ExportImportFilesMetaData& metadata, - ColumnFamilyHandle** handle) = 0; - - // Verify the block checksums of files in db. The block checksums of table - // files are checked. - virtual Status VerifyChecksum(const ReadOptions& read_options) = 0; - - virtual Status VerifyChecksum() { return VerifyChecksum(ReadOptions()); } - - // AddFile() is deprecated, please use IngestExternalFile() - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - ColumnFamilyHandle* column_family, - const std::vector& file_path_list, bool move_file = false, - bool skip_snapshot_check = false) { - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(column_family, file_path_list, ifo); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - const std::vector& file_path_list, bool move_file = false, - bool skip_snapshot_check = false) { - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(DefaultColumnFamily(), file_path_list, ifo); - } - - // AddFile() is deprecated, please use IngestExternalFile() - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - ColumnFamilyHandle* column_family, const std::string& file_path, - bool move_file = false, bool skip_snapshot_check = false) { - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(column_family, {file_path}, ifo); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - const std::string& file_path, bool move_file = false, - bool skip_snapshot_check = false) { - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(DefaultColumnFamily(), {file_path}, ifo); - } - - // Load table file with information "file_info" into "column_family" - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - ColumnFamilyHandle* column_family, - const std::vector& file_info_list, - bool move_file = false, bool skip_snapshot_check = false) { - std::vector external_files; - for (const ExternalSstFileInfo& file_info : file_info_list) { - external_files.push_back(file_info.file_path); - } - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(column_family, external_files, ifo); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - const std::vector& file_info_list, - bool move_file = false, bool skip_snapshot_check = false) { - std::vector external_files; - for (const ExternalSstFileInfo& file_info : file_info_list) { - external_files.push_back(file_info.file_path); - } - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(DefaultColumnFamily(), external_files, ifo); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - ColumnFamilyHandle* column_family, const ExternalSstFileInfo* file_info, - bool move_file = false, bool skip_snapshot_check = false) { - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(column_family, {file_info->file_path}, ifo); - } - - ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( - const ExternalSstFileInfo* file_info, bool move_file = false, - bool skip_snapshot_check = false) { - IngestExternalFileOptions ifo; - ifo.move_files = move_file; - ifo.snapshot_consistency = !skip_snapshot_check; - ifo.allow_global_seqno = false; - ifo.allow_blocking_flush = false; - return IngestExternalFile(DefaultColumnFamily(), {file_info->file_path}, - ifo); - } - -#endif // ROCKSDB_LITE - - // Returns the unique ID which is read from IDENTITY file during the opening - // of database by setting in the identity variable - // Returns Status::OK if identity could be set properly - virtual Status GetDbIdentity(std::string& identity) const = 0; - - // Return a unique identifier for each DB object that is opened - // This DB session ID should be unique among all open DB instances on all - // hosts, and should be unique among re-openings of the same or other DBs. - // (Two open DBs have the same identity from other function GetDbIdentity when - // one is physically copied from the other.) - virtual Status GetDbSessionId(std::string& session_id) const = 0; - - // Returns default column family handle - virtual ColumnFamilyHandle* DefaultColumnFamily() const = 0; - -#ifndef ROCKSDB_LITE - virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, - TablePropertiesCollection* props) = 0; - virtual Status GetPropertiesOfAllTables(TablePropertiesCollection* props) { - return GetPropertiesOfAllTables(DefaultColumnFamily(), props); - } - virtual Status GetPropertiesOfTablesInRange( - ColumnFamilyHandle* column_family, const Range* range, std::size_t n, - TablePropertiesCollection* props) = 0; - - virtual Status SuggestCompactRange(ColumnFamilyHandle* /*column_family*/, - const Slice* /*begin*/, - const Slice* /*end*/) { - return Status::NotSupported("SuggestCompactRange() is not implemented."); - } - - virtual Status PromoteL0(ColumnFamilyHandle* /*column_family*/, - int /*target_level*/) { - return Status::NotSupported("PromoteL0() is not implemented."); - } - - // Trace DB operations. Use EndTrace() to stop tracing. - virtual Status StartTrace(const TraceOptions& /*options*/, - std::unique_ptr&& /*trace_writer*/) { - return Status::NotSupported("StartTrace() is not implemented."); - } - - virtual Status EndTrace() { - return Status::NotSupported("EndTrace() is not implemented."); - } - - // StartIOTrace and EndIOTrace are experimental. They are not enabled yet. - virtual Status StartIOTrace(Env* /*env*/, const TraceOptions& /*options*/, - std::unique_ptr&& /*trace_writer*/) { - return Status::NotSupported("StartTrace() is not implemented."); - } - - virtual Status EndIOTrace() { - return Status::NotSupported("StartTrace() is not implemented."); - } - - // Trace block cache accesses. Use EndBlockCacheTrace() to stop tracing. - virtual Status StartBlockCacheTrace( - const TraceOptions& /*options*/, - std::unique_ptr&& /*trace_writer*/) { - return Status::NotSupported("StartBlockCacheTrace() is not implemented."); - } - - virtual Status EndBlockCacheTrace() { - return Status::NotSupported("EndBlockCacheTrace() is not implemented."); - } -#endif // ROCKSDB_LITE - - // Needed for StackableDB - virtual DB* GetRootDB() { return this; } - - // Given a window [start_time, end_time), setup a StatsHistoryIterator - // to access stats history. Note the start_time and end_time are epoch - // time measured in seconds, and end_time is an exclusive bound. - virtual Status GetStatsHistory( - uint64_t /*start_time*/, uint64_t /*end_time*/, - std::unique_ptr* /*stats_iterator*/) { - return Status::NotSupported("GetStatsHistory() is not implemented."); - } - -#ifndef ROCKSDB_LITE - // Make the secondary instance catch up with the primary by tailing and - // replaying the MANIFEST and WAL of the primary. - // Column families created by the primary after the secondary instance starts - // will be ignored unless the secondary instance closes and restarts with the - // newly created column families. - // Column families that exist before secondary instance starts and dropped by - // the primary afterwards will be marked as dropped. However, as long as the - // secondary instance does not delete the corresponding column family - // handles, the data of the column family is still accessible to the - // secondary. - // TODO: we will support WAL tailing soon. - virtual Status TryCatchUpWithPrimary() { - return Status::NotSupported("Supported only by secondary instance"); - } -#endif // !ROCKSDB_LITE -}; - -// Destroy the contents of the specified database. -// Be very careful using this method. -Status DestroyDB(const std::string& name, const Options& options, - const std::vector& column_families = - std::vector()); - -#ifndef ROCKSDB_LITE -// If a DB cannot be opened, you may attempt to call this method to -// resurrect as much of the contents of the database as possible. -// Some data may be lost, so be careful when calling this function -// on a database that contains important information. -// -// With this API, we will warn and skip data associated with column families not -// specified in column_families. -// -// @param column_families Descriptors for known column families -Status RepairDB(const std::string& dbname, const DBOptions& db_options, - const std::vector& column_families); - -// @param unknown_cf_opts Options for column families encountered during the -// repair that were not specified in column_families. -Status RepairDB(const std::string& dbname, const DBOptions& db_options, - const std::vector& column_families, - const ColumnFamilyOptions& unknown_cf_opts); - -// @param options These options will be used for the database and for ALL column -// families encountered during the repair -Status RepairDB(const std::string& dbname, const Options& options); - -#endif - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/db_bench_tool.h b/dist/darwin_amd64/include/rocksdb/db_bench_tool.h deleted file mode 100644 index 17f4e6b..0000000 --- a/dist/darwin_amd64/include/rocksdb/db_bench_tool.h +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2013-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#pragma once - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { -int db_bench_tool(int argc, char** argv); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/db_dump_tool.h b/dist/darwin_amd64/include/rocksdb/db_dump_tool.h deleted file mode 100644 index b7d4766..0000000 --- a/dist/darwin_amd64/include/rocksdb/db_dump_tool.h +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifndef ROCKSDB_LITE - -#include - -#include "rocksdb/db.h" - -namespace ROCKSDB_NAMESPACE { - -struct DumpOptions { - // Database that will be dumped - std::string db_path; - // File location that will contain dump output - std::string dump_location; - // Don't include db information header in the dump - bool anonymous = false; -}; - -class DbDumpTool { - public: - bool Run(const DumpOptions& dump_options, - ROCKSDB_NAMESPACE::Options options = ROCKSDB_NAMESPACE::Options()); -}; - -struct UndumpOptions { - // Database that we will load the dumped file into - std::string db_path; - // File location of the dumped file that will be loaded - std::string dump_location; - // Compact the db after loading the dumped file - bool compact_db = false; -}; - -class DbUndumpTool { - public: - bool Run(const UndumpOptions& undump_options, - ROCKSDB_NAMESPACE::Options options = ROCKSDB_NAMESPACE::Options()); -}; -} // namespace ROCKSDB_NAMESPACE -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/db_stress_tool.h b/dist/darwin_amd64/include/rocksdb/db_stress_tool.h deleted file mode 100644 index 7d3d42c..0000000 --- a/dist/darwin_amd64/include/rocksdb/db_stress_tool.h +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2013-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#pragma once - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { -int db_stress_tool(int argc, char** argv); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/env.h b/dist/darwin_amd64/include/rocksdb/env.h deleted file mode 100644 index 955d591..0000000 --- a/dist/darwin_amd64/include/rocksdb/env.h +++ /dev/null @@ -1,1653 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// An Env is an interface used by the rocksdb implementation to access -// operating system functionality like the filesystem etc. Callers -// may wish to provide a custom Env object when opening a database to -// get fine gain control; e.g., to rate limit file system operations. -// -// All Env implementations are safe for concurrent access from -// multiple threads without any external synchronization. - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include "rocksdb/status.h" -#include "rocksdb/thread_status.h" - -#ifdef _WIN32 -// Windows API macro interference -#undef DeleteFile -#undef GetCurrentTime -#endif - -#if defined(__GNUC__) || defined(__clang__) -#define ROCKSDB_PRINTF_FORMAT_ATTR(format_param, dots_param) \ - __attribute__((__format__(__printf__, format_param, dots_param))) -#else -#define ROCKSDB_PRINTF_FORMAT_ATTR(format_param, dots_param) -#endif - -namespace ROCKSDB_NAMESPACE { - -class DynamicLibrary; -class FileLock; -class Logger; -class RandomAccessFile; -class SequentialFile; -class Slice; -class WritableFile; -class RandomRWFile; -class MemoryMappedFileBuffer; -class Directory; -struct DBOptions; -struct ImmutableDBOptions; -struct MutableDBOptions; -class RateLimiter; -class ThreadStatusUpdater; -struct ThreadStatus; -class FileSystem; - -const size_t kDefaultPageSize = 4 * 1024; - -enum class CpuPriority { - kIdle = 0, - kLow = 1, - kNormal = 2, - kHigh = 3, -}; - -// Options while opening a file to read/write -struct EnvOptions { - // Construct with default Options - EnvOptions(); - - // Construct from Options - explicit EnvOptions(const DBOptions& options); - - // If true, then use mmap to read data - bool use_mmap_reads = false; - - // If true, then use mmap to write data - bool use_mmap_writes = true; - - // If true, then use O_DIRECT for reading data - bool use_direct_reads = false; - - // If true, then use O_DIRECT for writing data - bool use_direct_writes = false; - - // If false, fallocate() calls are bypassed - bool allow_fallocate = true; - - // If true, set the FD_CLOEXEC on open fd. - bool set_fd_cloexec = true; - - // Allows OS to incrementally sync files to disk while they are being - // written, in the background. Issue one request for every bytes_per_sync - // written. 0 turns it off. - // Default: 0 - uint64_t bytes_per_sync = 0; - - // When true, guarantees the file has at most `bytes_per_sync` bytes submitted - // for writeback at any given time. - // - // - If `sync_file_range` is supported it achieves this by waiting for any - // prior `sync_file_range`s to finish before proceeding. In this way, - // processing (compression, etc.) can proceed uninhibited in the gap - // between `sync_file_range`s, and we block only when I/O falls behind. - // - Otherwise the `WritableFile::Sync` method is used. Note this mechanism - // always blocks, thus preventing the interleaving of I/O and processing. - // - // Note: Enabling this option does not provide any additional persistence - // guarantees, as it may use `sync_file_range`, which does not write out - // metadata. - // - // Default: false - bool strict_bytes_per_sync = false; - - // If true, we will preallocate the file with FALLOC_FL_KEEP_SIZE flag, which - // means that file size won't change as part of preallocation. - // If false, preallocation will also change the file size. This option will - // improve the performance in workloads where you sync the data on every - // write. By default, we set it to true for MANIFEST writes and false for - // WAL writes - bool fallocate_with_keep_size = true; - - // See DBOptions doc - size_t compaction_readahead_size = 0; - - // See DBOptions doc - size_t random_access_max_buffer_size = 0; - - // See DBOptions doc - size_t writable_file_max_buffer_size = 1024 * 1024; - - // If not nullptr, write rate limiting is enabled for flush and compaction - RateLimiter* rate_limiter = nullptr; -}; - -class Env { - public: - struct FileAttributes { - // File name - std::string name; - - // Size of file in bytes - uint64_t size_bytes; - }; - - Env(); - // Construct an Env with a separate FileSystem implementation - Env(std::shared_ptr fs); - // No copying allowed - Env(const Env&) = delete; - void operator=(const Env&) = delete; - - virtual ~Env(); - - static const char* Type() { return "Environment"; } - - // Loads the environment specified by the input value into the result - static Status LoadEnv(const std::string& value, Env** result); - - // Loads the environment specified by the input value into the result - static Status LoadEnv(const std::string& value, Env** result, - std::shared_ptr* guard); - - // Return a default environment suitable for the current operating - // system. Sophisticated users may wish to provide their own Env - // implementation instead of relying on this default environment. - // - // The result of Default() belongs to rocksdb and must never be deleted. - static Env* Default(); - - // See FileSystem::RegisterDbPaths. - virtual Status RegisterDbPaths(const std::vector& /*paths*/) { - return Status::OK(); - } - // See FileSystem::UnregisterDbPaths. - virtual Status UnregisterDbPaths(const std::vector& /*paths*/) { - return Status::OK(); - } - - // Create a brand new sequentially-readable file with the specified name. - // On success, stores a pointer to the new file in *result and returns OK. - // On failure stores nullptr in *result and returns non-OK. If the file does - // not exist, returns a non-OK status. - // - // The returned file will only be accessed by one thread at a time. - virtual Status NewSequentialFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) = 0; - - // Create a brand new random access read-only file with the - // specified name. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. If the file does not exist, returns a non-OK - // status. - // - // The returned file may be concurrently accessed by multiple threads. - virtual Status NewRandomAccessFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) = 0; - // These values match Linux definition - // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fcntl.h#n56 - enum WriteLifeTimeHint { - WLTH_NOT_SET = 0, // No hint information set - WLTH_NONE, // No hints about write life time - WLTH_SHORT, // Data written has a short life time - WLTH_MEDIUM, // Data written has a medium life time - WLTH_LONG, // Data written has a long life time - WLTH_EXTREME, // Data written has an extremely long life time - }; - - // Create an object that writes to a new file with the specified - // name. Deletes any existing file with the same name and creates a - // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - virtual Status NewWritableFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) = 0; - - // Create an object that writes to a new file with the specified - // name. Deletes any existing file with the same name and creates a - // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - virtual Status ReopenWritableFile(const std::string& /*fname*/, - std::unique_ptr* /*result*/, - const EnvOptions& /*options*/) { - return Status::NotSupported("Env::ReopenWritableFile() not supported."); - } - - // Reuse an existing file by renaming it and opening it as writable. - virtual Status ReuseWritableFile(const std::string& fname, - const std::string& old_fname, - std::unique_ptr* result, - const EnvOptions& options); - - // Open `fname` for random read and write, if file doesn't exist the file - // will be created. On success, stores a pointer to the new file in - // *result and returns OK. On failure returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - virtual Status NewRandomRWFile(const std::string& /*fname*/, - std::unique_ptr* /*result*/, - const EnvOptions& /*options*/) { - return Status::NotSupported("RandomRWFile is not implemented in this Env"); - } - - // Opens `fname` as a memory-mapped file for read and write (in-place updates - // only, i.e., no appends). On success, stores a raw buffer covering the whole - // file in `*result`. The file must exist prior to this call. - virtual Status NewMemoryMappedFileBuffer( - const std::string& /*fname*/, - std::unique_ptr* /*result*/) { - return Status::NotSupported( - "MemoryMappedFileBuffer is not implemented in this Env"); - } - - // Create an object that represents a directory. Will fail if directory - // doesn't exist. If the directory exists, it will open the directory - // and create a new Directory object. - // - // On success, stores a pointer to the new Directory in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - virtual Status NewDirectory(const std::string& name, - std::unique_ptr* result) = 0; - - // Returns OK if the named file exists. - // NotFound if the named file does not exist, - // the calling process does not have permission to determine - // whether this file exists, or if the path is invalid. - // IOError if an IO Error was encountered - virtual Status FileExists(const std::string& fname) = 0; - - // Store in *result the names of the children of the specified directory. - // The names are relative to "dir". - // Original contents of *results are dropped. - // Returns OK if "dir" exists and "*result" contains its children. - // NotFound if "dir" does not exist, the calling process does not have - // permission to access "dir", or if "dir" is invalid. - // IOError if an IO Error was encountered - virtual Status GetChildren(const std::string& dir, - std::vector* result) = 0; - - // Store in *result the attributes of the children of the specified directory. - // In case the implementation lists the directory prior to iterating the files - // and files are concurrently deleted, the deleted files will be omitted from - // result. - // The name attributes are relative to "dir". - // Original contents of *results are dropped. - // Returns OK if "dir" exists and "*result" contains its children. - // NotFound if "dir" does not exist, the calling process does not have - // permission to access "dir", or if "dir" is invalid. - // IOError if an IO Error was encountered - virtual Status GetChildrenFileAttributes(const std::string& dir, - std::vector* result); - - // Delete the named file. - virtual Status DeleteFile(const std::string& fname) = 0; - - // Truncate the named file to the specified size. - virtual Status Truncate(const std::string& /*fname*/, size_t /*size*/) { - return Status::NotSupported("Truncate is not supported for this Env"); - } - - // Create the specified directory. Returns error if directory exists. - virtual Status CreateDir(const std::string& dirname) = 0; - - // Creates directory if missing. Return Ok if it exists, or successful in - // Creating. - virtual Status CreateDirIfMissing(const std::string& dirname) = 0; - - // Delete the specified directory. - // Many implementations of this function will only delete a directory if it is - // empty. - virtual Status DeleteDir(const std::string& dirname) = 0; - - // Store the size of fname in *file_size. - virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0; - - // Store the last modification time of fname in *file_mtime. - virtual Status GetFileModificationTime(const std::string& fname, - uint64_t* file_mtime) = 0; - // Rename file src to target. - virtual Status RenameFile(const std::string& src, - const std::string& target) = 0; - - // Hard Link file src to target. - virtual Status LinkFile(const std::string& /*src*/, - const std::string& /*target*/) { - return Status::NotSupported("LinkFile is not supported for this Env"); - } - - virtual Status NumFileLinks(const std::string& /*fname*/, - uint64_t* /*count*/) { - return Status::NotSupported( - "Getting number of file links is not supported for this Env"); - } - - virtual Status AreFilesSame(const std::string& /*first*/, - const std::string& /*second*/, bool* /*res*/) { - return Status::NotSupported("AreFilesSame is not supported for this Env"); - } - - // Lock the specified file. Used to prevent concurrent access to - // the same db by multiple processes. On failure, stores nullptr in - // *lock and returns non-OK. - // - // On success, stores a pointer to the object that represents the - // acquired lock in *lock and returns OK. The caller should call - // UnlockFile(*lock) to release the lock. If the process exits, - // the lock will be automatically released. - // - // If somebody else already holds the lock, finishes immediately - // with a failure. I.e., this call does not wait for existing locks - // to go away. - // - // May create the named file if it does not already exist. - virtual Status LockFile(const std::string& fname, FileLock** lock) = 0; - - // Release the lock acquired by a previous successful call to LockFile. - // REQUIRES: lock was returned by a successful LockFile() call - // REQUIRES: lock has not already been unlocked. - virtual Status UnlockFile(FileLock* lock) = 0; - - // Opens `lib_name` as a dynamic library. - // If the 'search_path' is specified, breaks the path into its components - // based on the appropriate platform separator (";" or ";") and looks for the - // library in those directories. If 'search path is not specified, uses the - // default library path search mechanism (such as LD_LIBRARY_PATH). On - // success, stores a dynamic library in `*result`. - virtual Status LoadLibrary(const std::string& /*lib_name*/, - const std::string& /*search_path */, - std::shared_ptr* /*result*/) { - return Status::NotSupported("LoadLibrary is not implemented in this Env"); - } - - // Priority for scheduling job in thread pool - enum Priority { BOTTOM, LOW, HIGH, USER, TOTAL }; - - static std::string PriorityToString(Priority priority); - - // Priority for requesting bytes in rate limiter scheduler - enum IOPriority { IO_LOW = 0, IO_HIGH = 1, IO_TOTAL = 2 }; - - // Arrange to run "(*function)(arg)" once in a background thread, in - // the thread pool specified by pri. By default, jobs go to the 'LOW' - // priority thread pool. - - // "function" may run in an unspecified thread. Multiple functions - // added to the same Env may run concurrently in different threads. - // I.e., the caller may not assume that background work items are - // serialized. - // When the UnSchedule function is called, the unschedFunction - // registered at the time of Schedule is invoked with arg as a parameter. - virtual void Schedule(void (*function)(void* arg), void* arg, - Priority pri = LOW, void* tag = nullptr, - void (*unschedFunction)(void* arg) = nullptr) = 0; - - // Arrange to remove jobs for given arg from the queue_ if they are not - // already scheduled. Caller is expected to have exclusive lock on arg. - virtual int UnSchedule(void* /*arg*/, Priority /*pri*/) { return 0; } - - // Start a new thread, invoking "function(arg)" within the new thread. - // When "function(arg)" returns, the thread will be destroyed. - virtual void StartThread(void (*function)(void* arg), void* arg) = 0; - - // Wait for all threads started by StartThread to terminate. - virtual void WaitForJoin() {} - - // Get thread pool queue length for specific thread pool. - virtual unsigned int GetThreadPoolQueueLen(Priority /*pri*/ = LOW) const { - return 0; - } - - // *path is set to a temporary directory that can be used for testing. It may - // or many not have just been created. The directory may or may not differ - // between runs of the same process, but subsequent calls will return the - // same directory. - virtual Status GetTestDirectory(std::string* path) = 0; - - // Create and returns a default logger (an instance of EnvLogger) for storing - // informational messages. Derived classes can overide to provide custom - // logger. - virtual Status NewLogger(const std::string& fname, - std::shared_ptr* result); - - // Returns the number of micro-seconds since some fixed point in time. - // It is often used as system time such as in GenericRateLimiter - // and other places so a port needs to return system time in order to work. - virtual uint64_t NowMicros() = 0; - - // Returns the number of nano-seconds since some fixed point in time. Only - // useful for computing deltas of time in one run. - // Default implementation simply relies on NowMicros. - // In platform-specific implementations, NowNanos() should return time points - // that are MONOTONIC. - virtual uint64_t NowNanos() { return NowMicros() * 1000; } - - // 0 indicates not supported. - virtual uint64_t NowCPUNanos() { return 0; } - - // Sleep/delay the thread for the prescribed number of micro-seconds. - virtual void SleepForMicroseconds(int micros) = 0; - - // Get the current host name. - virtual Status GetHostName(char* name, uint64_t len) = 0; - - // Get the number of seconds since the Epoch, 1970-01-01 00:00:00 (UTC). - // Only overwrites *unix_time on success. - virtual Status GetCurrentTime(int64_t* unix_time) = 0; - - // Get full directory name for this db. - virtual Status GetAbsolutePath(const std::string& db_path, - std::string* output_path) = 0; - - // The number of background worker threads of a specific thread pool - // for this environment. 'LOW' is the default pool. - // default number: 1 - virtual void SetBackgroundThreads(int number, Priority pri = LOW) = 0; - virtual int GetBackgroundThreads(Priority pri = LOW) = 0; - - virtual Status SetAllowNonOwnerAccess(bool /*allow_non_owner_access*/) { - return Status::NotSupported("Env::SetAllowNonOwnerAccess() not supported."); - } - - // Enlarge number of background worker threads of a specific thread pool - // for this environment if it is smaller than specified. 'LOW' is the default - // pool. - virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0; - - // Lower IO priority for threads from the specified pool. - virtual void LowerThreadPoolIOPriority(Priority /*pool*/ = LOW) {} - - // Lower CPU priority for threads from the specified pool. - virtual Status LowerThreadPoolCPUPriority(Priority /*pool*/, - CpuPriority /*pri*/) { - return Status::NotSupported( - "Env::LowerThreadPoolCPUPriority(Priority, CpuPriority) not supported"); - } - - // Lower CPU priority for threads from the specified pool. - virtual void LowerThreadPoolCPUPriority(Priority /*pool*/ = LOW) {} - - // Converts seconds-since-Jan-01-1970 to a printable string - virtual std::string TimeToString(uint64_t time) = 0; - - // Generates a unique id that can be used to identify a db - virtual std::string GenerateUniqueId(); - - // OptimizeForLogWrite will create a new EnvOptions object that is a copy of - // the EnvOptions in the parameters, but is optimized for reading log files. - virtual EnvOptions OptimizeForLogRead(const EnvOptions& env_options) const; - - // OptimizeForManifestRead will create a new EnvOptions object that is a copy - // of the EnvOptions in the parameters, but is optimized for reading manifest - // files. - virtual EnvOptions OptimizeForManifestRead( - const EnvOptions& env_options) const; - - // OptimizeForLogWrite will create a new EnvOptions object that is a copy of - // the EnvOptions in the parameters, but is optimized for writing log files. - // Default implementation returns the copy of the same object. - virtual EnvOptions OptimizeForLogWrite(const EnvOptions& env_options, - const DBOptions& db_options) const; - // OptimizeForManifestWrite will create a new EnvOptions object that is a copy - // of the EnvOptions in the parameters, but is optimized for writing manifest - // files. Default implementation returns the copy of the same object. - virtual EnvOptions OptimizeForManifestWrite( - const EnvOptions& env_options) const; - - // OptimizeForCompactionTableWrite will create a new EnvOptions object that is - // a copy of the EnvOptions in the parameters, but is optimized for writing - // table files. - virtual EnvOptions OptimizeForCompactionTableWrite( - const EnvOptions& env_options, - const ImmutableDBOptions& immutable_ops) const; - - // OptimizeForCompactionTableWrite will create a new EnvOptions object that - // is a copy of the EnvOptions in the parameters, but is optimized for reading - // table files. - virtual EnvOptions OptimizeForCompactionTableRead( - const EnvOptions& env_options, - const ImmutableDBOptions& db_options) const; - - // Returns the status of all threads that belong to the current Env. - virtual Status GetThreadList(std::vector* /*thread_list*/) { - return Status::NotSupported("Env::GetThreadList() not supported."); - } - - // Returns the pointer to ThreadStatusUpdater. This function will be - // used in RocksDB internally to update thread status and supports - // GetThreadList(). - virtual ThreadStatusUpdater* GetThreadStatusUpdater() const { - return thread_status_updater_; - } - - // Returns the ID of the current thread. - virtual uint64_t GetThreadID() const; - -// This seems to clash with a macro on Windows, so #undef it here -#undef GetFreeSpace - - // Get the amount of free disk space - virtual Status GetFreeSpace(const std::string& /*path*/, - uint64_t* /*diskfree*/) { - return Status::NotSupported("Env::GetFreeSpace() not supported."); - } - - // Check whether the specified path is a directory - virtual Status IsDirectory(const std::string& /*path*/, bool* /*is_dir*/) { - return Status::NotSupported("Env::IsDirectory() not supported."); - } - - virtual void SanitizeEnvOptions(EnvOptions* /*env_opts*/) const {} - - // Get the FileSystem implementation this Env was constructed with. It - // could be a fully implemented one, or a wrapper class around the Env - const std::shared_ptr& GetFileSystem() const; - - // If you're adding methods here, remember to add them to EnvWrapper too. - - protected: - // The pointer to an internal structure that will update the - // status of each thread. - ThreadStatusUpdater* thread_status_updater_; - - // Pointer to the underlying FileSystem implementation - std::shared_ptr file_system_; -}; - -// The factory function to construct a ThreadStatusUpdater. Any Env -// that supports GetThreadList() feature should call this function in its -// constructor to initialize thread_status_updater_. -ThreadStatusUpdater* CreateThreadStatusUpdater(); - -// A file abstraction for reading sequentially through a file -class SequentialFile { - public: - SequentialFile() {} - virtual ~SequentialFile(); - - // Read up to "n" bytes from the file. "scratch[0..n-1]" may be - // written by this routine. Sets "*result" to the data that was - // read (including if fewer than "n" bytes were successfully read). - // May set "*result" to point at data in "scratch[0..n-1]", so - // "scratch[0..n-1]" must be live when "*result" is used. - // If an error was encountered, returns a non-OK status. - // - // REQUIRES: External synchronization - virtual Status Read(size_t n, Slice* result, char* scratch) = 0; - - // Skip "n" bytes from the file. This is guaranteed to be no - // slower that reading the same data, but may be faster. - // - // If end of file is reached, skipping will stop at the end of the - // file, and Skip will return OK. - // - // REQUIRES: External synchronization - virtual Status Skip(uint64_t n) = 0; - - // Indicates the upper layers if the current SequentialFile implementation - // uses direct IO. - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { - return Status::NotSupported( - "SequentialFile::InvalidateCache not supported."); - } - - // Positioned Read for direct I/O - // If Direct I/O enabled, offset, n, and scratch should be properly aligned - virtual Status PositionedRead(uint64_t /*offset*/, size_t /*n*/, - Slice* /*result*/, char* /*scratch*/) { - return Status::NotSupported( - "SequentialFile::PositionedRead() not supported."); - } - - // If you're adding methods here, remember to add them to - // SequentialFileWrapper too. -}; - -// A read IO request structure for use in MultiRead -struct ReadRequest { - // File offset in bytes - uint64_t offset; - - // Length to read in bytes - size_t len; - - // A buffer that MultiRead() can optionally place data in. It can - // ignore this and allocate its own buffer - char* scratch; - - // Output parameter set by MultiRead() to point to the data buffer, and - // the number of valid bytes - Slice result; - - // Status of read - Status status; -}; - -// A file abstraction for randomly reading the contents of a file. -class RandomAccessFile { - public: - RandomAccessFile() {} - virtual ~RandomAccessFile(); - - // Read up to "n" bytes from the file starting at "offset". - // "scratch[0..n-1]" may be written by this routine. Sets "*result" - // to the data that was read (including if fewer than "n" bytes were - // successfully read). May set "*result" to point at data in - // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when - // "*result" is used. If an error was encountered, returns a non-OK - // status. - // - // Safe for concurrent use by multiple threads. - // If Direct I/O enabled, offset, n, and scratch should be aligned properly. - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const = 0; - - // Readahead the file starting from offset by n bytes for caching. - virtual Status Prefetch(uint64_t /*offset*/, size_t /*n*/) { - return Status::OK(); - } - - // Read a bunch of blocks as described by reqs. The blocks can - // optionally be read in parallel. This is a synchronous call, i.e it - // should return after all reads have completed. The reads will be - // non-overlapping. If the function return Status is not ok, status of - // individual requests will be ignored and return status will be assumed - // for all read requests. The function return status is only meant for any - // any errors that occur before even processing specific read requests - virtual Status MultiRead(ReadRequest* reqs, size_t num_reqs) { - assert(reqs != nullptr); - for (size_t i = 0; i < num_reqs; ++i) { - ReadRequest& req = reqs[i]; - req.status = Read(req.offset, req.len, &req.result, req.scratch); - } - return Status::OK(); - } - - // Tries to get an unique ID for this file that will be the same each time - // the file is opened (and will stay the same while the file is open). - // Furthermore, it tries to make this ID at most "max_size" bytes. If such an - // ID can be created this function returns the length of the ID and places it - // in "id"; otherwise, this function returns 0, in which case "id" - // may not have been modified. - // - // This function guarantees, for IDs from a given environment, two unique ids - // cannot be made equal to each other by adding arbitrary bytes to one of - // them. That is, no unique ID is the prefix of another. - // - // This function guarantees that the returned ID will not be interpretable as - // a single varint. - // - // Note: these IDs are only valid for the duration of the process. - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { - return 0; // Default implementation to prevent issues with backwards - // compatibility. - } - - enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED }; - - virtual void Hint(AccessPattern /*pattern*/) {} - - // Indicates the upper layers if the current RandomAccessFile implementation - // uses direct IO. - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { - return Status::NotSupported( - "RandomAccessFile::InvalidateCache not supported."); - } - - // If you're adding methods here, remember to add them to - // RandomAccessFileWrapper too. -}; - -// A file abstraction for sequential writing. The implementation -// must provide buffering since callers may append small fragments -// at a time to the file. -class WritableFile { - public: - WritableFile() - : last_preallocated_block_(0), - preallocation_block_size_(0), - io_priority_(Env::IO_TOTAL), - write_hint_(Env::WLTH_NOT_SET), - strict_bytes_per_sync_(false) {} - - explicit WritableFile(const EnvOptions& options) - : last_preallocated_block_(0), - preallocation_block_size_(0), - io_priority_(Env::IO_TOTAL), - write_hint_(Env::WLTH_NOT_SET), - strict_bytes_per_sync_(options.strict_bytes_per_sync) {} - // No copying allowed - WritableFile(const WritableFile&) = delete; - void operator=(const WritableFile&) = delete; - - virtual ~WritableFile(); - - // Append data to the end of the file - // Note: A WriteabelFile object must support either Append or - // PositionedAppend, so the users cannot mix the two. - virtual Status Append(const Slice& data) = 0; - - // PositionedAppend data to the specified offset. The new EOF after append - // must be larger than the previous EOF. This is to be used when writes are - // not backed by OS buffers and hence has to always start from the start of - // the sector. The implementation thus needs to also rewrite the last - // partial sector. - // Note: PositionAppend does not guarantee moving the file offset after the - // write. A WritableFile object must support either Append or - // PositionedAppend, so the users cannot mix the two. - // - // PositionedAppend() can only happen on the page/sector boundaries. For that - // reason, if the last write was an incomplete sector we still need to rewind - // back to the nearest sector/page and rewrite the portion of it with whatever - // we need to add. We need to keep where we stop writing. - // - // PositionedAppend() can only write whole sectors. For that reason we have to - // pad with zeros for the last write and trim the file when closing according - // to the position we keep in the previous step. - // - // PositionedAppend() requires aligned buffer to be passed in. The alignment - // required is queried via GetRequiredBufferAlignment() - virtual Status PositionedAppend(const Slice& /* data */, - uint64_t /* offset */) { - return Status::NotSupported( - "WritableFile::PositionedAppend() not supported."); - } - - // Truncate is necessary to trim the file to the correct size - // before closing. It is not always possible to keep track of the file - // size due to whole pages writes. The behavior is undefined if called - // with other writes to follow. - virtual Status Truncate(uint64_t /*size*/) { return Status::OK(); } - virtual Status Close() = 0; - virtual Status Flush() = 0; - virtual Status Sync() = 0; // sync data - - /* - * Sync data and/or metadata as well. - * By default, sync only data. - * Override this method for environments where we need to sync - * metadata as well. - */ - virtual Status Fsync() { return Sync(); } - - // true if Sync() and Fsync() are safe to call concurrently with Append() - // and Flush(). - virtual bool IsSyncThreadSafe() const { return false; } - - // Indicates the upper layers if the current WritableFile implementation - // uses direct IO. - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - /* - * Change the priority in rate limiter if rate limiting is enabled. - * If rate limiting is not enabled, this call has no effect. - */ - virtual void SetIOPriority(Env::IOPriority pri) { io_priority_ = pri; } - - virtual Env::IOPriority GetIOPriority() { return io_priority_; } - - virtual void SetWriteLifeTimeHint(Env::WriteLifeTimeHint hint) { - write_hint_ = hint; - } - - virtual Env::WriteLifeTimeHint GetWriteLifeTimeHint() { return write_hint_; } - /* - * Get the size of valid data in the file. - */ - virtual uint64_t GetFileSize() { return 0; } - - /* - * Get and set the default pre-allocation block size for writes to - * this file. If non-zero, then Allocate will be used to extend the - * underlying storage of a file (generally via fallocate) if the Env - * instance supports it. - */ - virtual void SetPreallocationBlockSize(size_t size) { - preallocation_block_size_ = size; - } - - virtual void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) { - *last_allocated_block = last_preallocated_block_; - *block_size = preallocation_block_size_; - } - - // For documentation, refer to RandomAccessFile::GetUniqueId() - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { - return 0; // Default implementation to prevent issues with backwards - } - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - // This call has no effect on dirty pages in the cache. - virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { - return Status::NotSupported("WritableFile::InvalidateCache not supported."); - } - - // Sync a file range with disk. - // offset is the starting byte of the file range to be synchronized. - // nbytes specifies the length of the range to be synchronized. - // This asks the OS to initiate flushing the cached data to disk, - // without waiting for completion. - // Default implementation does nothing. - virtual Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) { - if (strict_bytes_per_sync_) { - return Sync(); - } - return Status::OK(); - } - - // PrepareWrite performs any necessary preparation for a write - // before the write actually occurs. This allows for pre-allocation - // of space on devices where it can result in less file - // fragmentation and/or less waste from over-zealous filesystem - // pre-allocation. - virtual void PrepareWrite(size_t offset, size_t len) { - if (preallocation_block_size_ == 0) { - return; - } - // If this write would cross one or more preallocation blocks, - // determine what the last preallocation block necessary to - // cover this write would be and Allocate to that point. - const auto block_size = preallocation_block_size_; - size_t new_last_preallocated_block = - (offset + len + block_size - 1) / block_size; - if (new_last_preallocated_block > last_preallocated_block_) { - size_t num_spanned_blocks = - new_last_preallocated_block - last_preallocated_block_; - // TODO: Don't ignore errors from allocate - Allocate(block_size * last_preallocated_block_, - block_size * num_spanned_blocks) - .PermitUncheckedError(); - last_preallocated_block_ = new_last_preallocated_block; - } - } - - // Pre-allocates space for a file. - virtual Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) { - return Status::OK(); - } - - // If you're adding methods here, remember to add them to - // WritableFileWrapper too. - - protected: - size_t preallocation_block_size() { return preallocation_block_size_; } - - private: - size_t last_preallocated_block_; - size_t preallocation_block_size_; - - protected: - Env::IOPriority io_priority_; - Env::WriteLifeTimeHint write_hint_; - const bool strict_bytes_per_sync_; -}; - -// A file abstraction for random reading and writing. -class RandomRWFile { - public: - RandomRWFile() {} - // No copying allowed - RandomRWFile(const RandomRWFile&) = delete; - RandomRWFile& operator=(const RandomRWFile&) = delete; - - virtual ~RandomRWFile() {} - - // Indicates if the class makes use of direct I/O - // If false you must pass aligned buffer to Write() - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - // Write bytes in `data` at offset `offset`, Returns Status::OK() on success. - // Pass aligned buffer when use_direct_io() returns true. - virtual Status Write(uint64_t offset, const Slice& data) = 0; - - // Read up to `n` bytes starting from offset `offset` and store them in - // result, provided `scratch` size should be at least `n`. - // Returns Status::OK() on success. - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const = 0; - - virtual Status Flush() = 0; - - virtual Status Sync() = 0; - - virtual Status Fsync() { return Sync(); } - - virtual Status Close() = 0; - - // If you're adding methods here, remember to add them to - // RandomRWFileWrapper too. -}; - -// MemoryMappedFileBuffer object represents a memory-mapped file's raw buffer. -// Subclasses should release the mapping upon destruction. -class MemoryMappedFileBuffer { - public: - MemoryMappedFileBuffer(void* _base, size_t _length) - : base_(_base), length_(_length) {} - - virtual ~MemoryMappedFileBuffer() = 0; - - // We do not want to unmap this twice. We can make this class - // movable if desired, however, since - MemoryMappedFileBuffer(const MemoryMappedFileBuffer&) = delete; - MemoryMappedFileBuffer& operator=(const MemoryMappedFileBuffer&) = delete; - - void* GetBase() const { return base_; } - size_t GetLen() const { return length_; } - - protected: - void* base_; - const size_t length_; -}; - -// Directory object represents collection of files and implements -// filesystem operations that can be executed on directories. -class Directory { - public: - virtual ~Directory() {} - // Fsync directory. Can be called concurrently from multiple threads. - virtual Status Fsync() = 0; - - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { - return 0; - } - - // If you're adding methods here, remember to add them to - // DirectoryWrapper too. -}; - -enum InfoLogLevel : unsigned char { - DEBUG_LEVEL = 0, - INFO_LEVEL, - WARN_LEVEL, - ERROR_LEVEL, - FATAL_LEVEL, - HEADER_LEVEL, - NUM_INFO_LOG_LEVELS, -}; - -// An interface for writing log messages. -class Logger { - public: - size_t kDoNotSupportGetLogFileSize = (std::numeric_limits::max)(); - - explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL) - : closed_(false), log_level_(log_level) {} - // No copying allowed - Logger(const Logger&) = delete; - void operator=(const Logger&) = delete; - - virtual ~Logger(); - - // Close the log file. Must be called before destructor. If the return - // status is NotSupported(), it means the implementation does cleanup in - // the destructor - virtual Status Close(); - - // Write a header to the log file with the specified format - // It is recommended that you log all header information at the start of the - // application. But it is not enforced. - virtual void LogHeader(const char* format, va_list ap) { - // Default implementation does a simple INFO level log write. - // Please override as per the logger class requirement. - Logv(format, ap); - } - - // Write an entry to the log file with the specified format. - virtual void Logv(const char* format, va_list ap) = 0; - - // Write an entry to the log file with the specified log level - // and format. Any log with level under the internal log level - // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be - // printed. - virtual void Logv(const InfoLogLevel log_level, const char* format, - va_list ap); - - virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; } - // Flush to the OS buffers - virtual void Flush() {} - virtual InfoLogLevel GetInfoLogLevel() const { return log_level_; } - virtual void SetInfoLogLevel(const InfoLogLevel log_level) { - log_level_ = log_level; - } - - // If you're adding methods here, remember to add them to LoggerWrapper too. - - protected: - virtual Status CloseImpl(); - bool closed_; - - private: - InfoLogLevel log_level_; -}; - -// Identifies a locked file. -class FileLock { - public: - FileLock() {} - virtual ~FileLock(); - - private: - // No copying allowed - FileLock(const FileLock&) = delete; - void operator=(const FileLock&) = delete; -}; - -class DynamicLibrary { - public: - virtual ~DynamicLibrary() {} - - // Returns the name of the dynamic library. - virtual const char* Name() const = 0; - - // Loads the symbol for sym_name from the library and updates the input - // function. Returns the loaded symbol. - template - Status LoadFunction(const std::string& sym_name, std::function* function) { - assert(nullptr != function); - void* ptr = nullptr; - Status s = LoadSymbol(sym_name, &ptr); - *function = reinterpret_cast(ptr); - return s; - } - // Loads and returns the symbol for sym_name from the library. - virtual Status LoadSymbol(const std::string& sym_name, void** func) = 0; -}; - -extern void LogFlush(const std::shared_ptr& info_log); - -extern void Log(const InfoLogLevel log_level, - const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(3, 4); - -// a set of log functions with different log levels. -extern void Header(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Debug(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Info(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Warn(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Error(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Fatal(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); - -// Log the specified data to *info_log if info_log is non-nullptr. -// The default info log level is InfoLogLevel::INFO_LEVEL. -extern void Log(const std::shared_ptr& info_log, const char* format, - ...) ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); - -extern void LogFlush(Logger* info_log); - -extern void Log(const InfoLogLevel log_level, Logger* info_log, - const char* format, ...) ROCKSDB_PRINTF_FORMAT_ATTR(3, 4); - -// The default info log level is InfoLogLevel::INFO_LEVEL. -extern void Log(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); - -// a set of log functions with different log levels. -extern void Header(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Debug(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Info(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Warn(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Error(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); -extern void Fatal(Logger* info_log, const char* format, ...) - ROCKSDB_PRINTF_FORMAT_ATTR(2, 3); - -// A utility routine: write "data" to the named file. -extern Status WriteStringToFile(Env* env, const Slice& data, - const std::string& fname, - bool should_sync = false); - -// A utility routine: read contents of named file into *data -extern Status ReadFileToString(Env* env, const std::string& fname, - std::string* data); - -// Below are helpers for wrapping most of the classes in this file. -// They forward all calls to another instance of the class. -// Useful when wrapping the default implementations. -// Typical usage is to inherit your wrapper from *Wrapper, e.g.: -// -// class MySequentialFileWrapper : public -// ROCKSDB_NAMESPACE::SequentialFileWrapper { -// public: -// MySequentialFileWrapper(ROCKSDB_NAMESPACE::SequentialFile* target): -// ROCKSDB_NAMESPACE::SequentialFileWrapper(target) {} -// Status Read(size_t n, Slice* result, char* scratch) override { -// cout << "Doing a read of size " << n << "!" << endl; -// return ROCKSDB_NAMESPACE::SequentialFileWrapper::Read(n, result, -// scratch); -// } -// // All other methods are forwarded to target_ automatically. -// }; -// -// This is often more convenient than inheriting the class directly because -// (a) Don't have to override and forward all methods - the Wrapper will -// forward everything you're not explicitly overriding. -// (b) Don't need to update the wrapper when more methods are added to the -// rocksdb class. Unless you actually want to override the behavior. -// (And unless rocksdb people forgot to update the *Wrapper class.) - -// An implementation of Env that forwards all calls to another Env. -// May be useful to clients who wish to override just part of the -// functionality of another Env. -class EnvWrapper : public Env { - public: - // Initialize an EnvWrapper that delegates all calls to *t - explicit EnvWrapper(Env* t) : target_(t) {} - ~EnvWrapper() override; - - // Return the target to which this Env forwards all calls - Env* target() const { return target_; } - - // The following text is boilerplate that forwards all methods to target() - Status RegisterDbPaths(const std::vector& paths) override { - return target_->RegisterDbPaths(paths); - } - - Status UnregisterDbPaths(const std::vector& paths) override { - return target_->UnregisterDbPaths(paths); - } - - Status NewSequentialFile(const std::string& f, - std::unique_ptr* r, - const EnvOptions& options) override { - return target_->NewSequentialFile(f, r, options); - } - Status NewRandomAccessFile(const std::string& f, - std::unique_ptr* r, - const EnvOptions& options) override { - return target_->NewRandomAccessFile(f, r, options); - } - Status NewWritableFile(const std::string& f, std::unique_ptr* r, - const EnvOptions& options) override { - return target_->NewWritableFile(f, r, options); - } - Status ReopenWritableFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) override { - return target_->ReopenWritableFile(fname, result, options); - } - Status ReuseWritableFile(const std::string& fname, - const std::string& old_fname, - std::unique_ptr* r, - const EnvOptions& options) override { - return target_->ReuseWritableFile(fname, old_fname, r, options); - } - Status NewRandomRWFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) override { - return target_->NewRandomRWFile(fname, result, options); - } - Status NewMemoryMappedFileBuffer( - const std::string& fname, - std::unique_ptr* result) override { - return target_->NewMemoryMappedFileBuffer(fname, result); - } - Status NewDirectory(const std::string& name, - std::unique_ptr* result) override { - return target_->NewDirectory(name, result); - } - Status FileExists(const std::string& f) override { - return target_->FileExists(f); - } - Status GetChildren(const std::string& dir, - std::vector* r) override { - return target_->GetChildren(dir, r); - } - Status GetChildrenFileAttributes( - const std::string& dir, std::vector* result) override { - return target_->GetChildrenFileAttributes(dir, result); - } - Status DeleteFile(const std::string& f) override { - return target_->DeleteFile(f); - } - Status Truncate(const std::string& fname, size_t size) override { - return target_->Truncate(fname, size); - } - Status CreateDir(const std::string& d) override { - return target_->CreateDir(d); - } - Status CreateDirIfMissing(const std::string& d) override { - return target_->CreateDirIfMissing(d); - } - Status DeleteDir(const std::string& d) override { - return target_->DeleteDir(d); - } - Status GetFileSize(const std::string& f, uint64_t* s) override { - return target_->GetFileSize(f, s); - } - - Status GetFileModificationTime(const std::string& fname, - uint64_t* file_mtime) override { - return target_->GetFileModificationTime(fname, file_mtime); - } - - Status RenameFile(const std::string& s, const std::string& t) override { - return target_->RenameFile(s, t); - } - - Status LinkFile(const std::string& s, const std::string& t) override { - return target_->LinkFile(s, t); - } - - Status NumFileLinks(const std::string& fname, uint64_t* count) override { - return target_->NumFileLinks(fname, count); - } - - Status AreFilesSame(const std::string& first, const std::string& second, - bool* res) override { - return target_->AreFilesSame(first, second, res); - } - - Status LockFile(const std::string& f, FileLock** l) override { - return target_->LockFile(f, l); - } - - Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); } - - Status IsDirectory(const std::string& path, bool* is_dir) override { - return target_->IsDirectory(path, is_dir); - } - - Status LoadLibrary(const std::string& lib_name, - const std::string& search_path, - std::shared_ptr* result) override { - return target_->LoadLibrary(lib_name, search_path, result); - } - - void Schedule(void (*f)(void* arg), void* a, Priority pri, - void* tag = nullptr, void (*u)(void* arg) = nullptr) override { - return target_->Schedule(f, a, pri, tag, u); - } - - int UnSchedule(void* tag, Priority pri) override { - return target_->UnSchedule(tag, pri); - } - - void StartThread(void (*f)(void*), void* a) override { - return target_->StartThread(f, a); - } - void WaitForJoin() override { return target_->WaitForJoin(); } - unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const override { - return target_->GetThreadPoolQueueLen(pri); - } - Status GetTestDirectory(std::string* path) override { - return target_->GetTestDirectory(path); - } - Status NewLogger(const std::string& fname, - std::shared_ptr* result) override { - return target_->NewLogger(fname, result); - } - uint64_t NowMicros() override { return target_->NowMicros(); } - uint64_t NowNanos() override { return target_->NowNanos(); } - uint64_t NowCPUNanos() override { return target_->NowCPUNanos(); } - - void SleepForMicroseconds(int micros) override { - target_->SleepForMicroseconds(micros); - } - Status GetHostName(char* name, uint64_t len) override { - return target_->GetHostName(name, len); - } - Status GetCurrentTime(int64_t* unix_time) override { - return target_->GetCurrentTime(unix_time); - } - Status GetAbsolutePath(const std::string& db_path, - std::string* output_path) override { - return target_->GetAbsolutePath(db_path, output_path); - } - void SetBackgroundThreads(int num, Priority pri) override { - return target_->SetBackgroundThreads(num, pri); - } - int GetBackgroundThreads(Priority pri) override { - return target_->GetBackgroundThreads(pri); - } - - Status SetAllowNonOwnerAccess(bool allow_non_owner_access) override { - return target_->SetAllowNonOwnerAccess(allow_non_owner_access); - } - - void IncBackgroundThreadsIfNeeded(int num, Priority pri) override { - return target_->IncBackgroundThreadsIfNeeded(num, pri); - } - - void LowerThreadPoolIOPriority(Priority pool) override { - target_->LowerThreadPoolIOPriority(pool); - } - - void LowerThreadPoolCPUPriority(Priority pool) override { - target_->LowerThreadPoolCPUPriority(pool); - } - - Status LowerThreadPoolCPUPriority(Priority pool, CpuPriority pri) override { - return target_->LowerThreadPoolCPUPriority(pool, pri); - } - - std::string TimeToString(uint64_t time) override { - return target_->TimeToString(time); - } - - Status GetThreadList(std::vector* thread_list) override { - return target_->GetThreadList(thread_list); - } - - ThreadStatusUpdater* GetThreadStatusUpdater() const override { - return target_->GetThreadStatusUpdater(); - } - - uint64_t GetThreadID() const override { return target_->GetThreadID(); } - - std::string GenerateUniqueId() override { - return target_->GenerateUniqueId(); - } - - EnvOptions OptimizeForLogRead(const EnvOptions& env_options) const override { - return target_->OptimizeForLogRead(env_options); - } - EnvOptions OptimizeForManifestRead( - const EnvOptions& env_options) const override { - return target_->OptimizeForManifestRead(env_options); - } - EnvOptions OptimizeForLogWrite(const EnvOptions& env_options, - const DBOptions& db_options) const override { - return target_->OptimizeForLogWrite(env_options, db_options); - } - EnvOptions OptimizeForManifestWrite( - const EnvOptions& env_options) const override { - return target_->OptimizeForManifestWrite(env_options); - } - EnvOptions OptimizeForCompactionTableWrite( - const EnvOptions& env_options, - const ImmutableDBOptions& immutable_ops) const override { - return target_->OptimizeForCompactionTableWrite(env_options, immutable_ops); - } - EnvOptions OptimizeForCompactionTableRead( - const EnvOptions& env_options, - const ImmutableDBOptions& db_options) const override { - return target_->OptimizeForCompactionTableRead(env_options, db_options); - } - Status GetFreeSpace(const std::string& path, uint64_t* diskfree) override { - return target_->GetFreeSpace(path, diskfree); - } - void SanitizeEnvOptions(EnvOptions* env_opts) const override { - target_->SanitizeEnvOptions(env_opts); - } - - private: - Env* target_; -}; - -class SequentialFileWrapper : public SequentialFile { - public: - explicit SequentialFileWrapper(SequentialFile* target) : target_(target) {} - - Status Read(size_t n, Slice* result, char* scratch) override { - return target_->Read(n, result, scratch); - } - Status Skip(uint64_t n) override { return target_->Skip(n); } - bool use_direct_io() const override { return target_->use_direct_io(); } - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - Status InvalidateCache(size_t offset, size_t length) override { - return target_->InvalidateCache(offset, length); - } - Status PositionedRead(uint64_t offset, size_t n, Slice* result, - char* scratch) override { - return target_->PositionedRead(offset, n, result, scratch); - } - - private: - SequentialFile* target_; -}; - -class RandomAccessFileWrapper : public RandomAccessFile { - public: - explicit RandomAccessFileWrapper(RandomAccessFile* target) - : target_(target) {} - - Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override { - return target_->Read(offset, n, result, scratch); - } - Status MultiRead(ReadRequest* reqs, size_t num_reqs) override { - return target_->MultiRead(reqs, num_reqs); - } - Status Prefetch(uint64_t offset, size_t n) override { - return target_->Prefetch(offset, n); - } - size_t GetUniqueId(char* id, size_t max_size) const override { - return target_->GetUniqueId(id, max_size); - } - void Hint(AccessPattern pattern) override { target_->Hint(pattern); } - bool use_direct_io() const override { return target_->use_direct_io(); } - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - Status InvalidateCache(size_t offset, size_t length) override { - return target_->InvalidateCache(offset, length); - } - - private: - RandomAccessFile* target_; -}; - -class WritableFileWrapper : public WritableFile { - public: - explicit WritableFileWrapper(WritableFile* t) : target_(t) {} - - Status Append(const Slice& data) override { return target_->Append(data); } - Status PositionedAppend(const Slice& data, uint64_t offset) override { - return target_->PositionedAppend(data, offset); - } - Status Truncate(uint64_t size) override { return target_->Truncate(size); } - Status Close() override { return target_->Close(); } - Status Flush() override { return target_->Flush(); } - Status Sync() override { return target_->Sync(); } - Status Fsync() override { return target_->Fsync(); } - bool IsSyncThreadSafe() const override { return target_->IsSyncThreadSafe(); } - - bool use_direct_io() const override { return target_->use_direct_io(); } - - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - - void SetIOPriority(Env::IOPriority pri) override { - target_->SetIOPriority(pri); - } - - Env::IOPriority GetIOPriority() override { return target_->GetIOPriority(); } - - void SetWriteLifeTimeHint(Env::WriteLifeTimeHint hint) override { - target_->SetWriteLifeTimeHint(hint); - } - - Env::WriteLifeTimeHint GetWriteLifeTimeHint() override { - return target_->GetWriteLifeTimeHint(); - } - - uint64_t GetFileSize() override { return target_->GetFileSize(); } - - void SetPreallocationBlockSize(size_t size) override { - target_->SetPreallocationBlockSize(size); - } - - void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) override { - target_->GetPreallocationStatus(block_size, last_allocated_block); - } - - size_t GetUniqueId(char* id, size_t max_size) const override { - return target_->GetUniqueId(id, max_size); - } - - Status InvalidateCache(size_t offset, size_t length) override { - return target_->InvalidateCache(offset, length); - } - - Status RangeSync(uint64_t offset, uint64_t nbytes) override { - return target_->RangeSync(offset, nbytes); - } - - void PrepareWrite(size_t offset, size_t len) override { - target_->PrepareWrite(offset, len); - } - - Status Allocate(uint64_t offset, uint64_t len) override { - return target_->Allocate(offset, len); - } - - private: - WritableFile* target_; -}; - -class RandomRWFileWrapper : public RandomRWFile { - public: - explicit RandomRWFileWrapper(RandomRWFile* target) : target_(target) {} - - bool use_direct_io() const override { return target_->use_direct_io(); } - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - Status Write(uint64_t offset, const Slice& data) override { - return target_->Write(offset, data); - } - Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override { - return target_->Read(offset, n, result, scratch); - } - Status Flush() override { return target_->Flush(); } - Status Sync() override { return target_->Sync(); } - Status Fsync() override { return target_->Fsync(); } - Status Close() override { return target_->Close(); } - - private: - RandomRWFile* target_; -}; - -class DirectoryWrapper : public Directory { - public: - explicit DirectoryWrapper(Directory* target) : target_(target) {} - - Status Fsync() override { return target_->Fsync(); } - size_t GetUniqueId(char* id, size_t max_size) const override { - return target_->GetUniqueId(id, max_size); - } - - private: - Directory* target_; -}; - -class LoggerWrapper : public Logger { - public: - explicit LoggerWrapper(Logger* target) : target_(target) {} - - Status Close() override { return target_->Close(); } - void LogHeader(const char* format, va_list ap) override { - return target_->LogHeader(format, ap); - } - void Logv(const char* format, va_list ap) override { - return target_->Logv(format, ap); - } - void Logv(const InfoLogLevel log_level, const char* format, - va_list ap) override { - return target_->Logv(log_level, format, ap); - } - size_t GetLogFileSize() const override { return target_->GetLogFileSize(); } - void Flush() override { return target_->Flush(); } - InfoLogLevel GetInfoLogLevel() const override { - return target_->GetInfoLogLevel(); - } - void SetInfoLogLevel(const InfoLogLevel log_level) override { - return target_->SetInfoLogLevel(log_level); - } - - private: - Logger* target_; -}; - -// Returns a new environment that stores its data in memory and delegates -// all non-file-storage tasks to base_env. The caller must delete the result -// when it is no longer needed. -// *base_env must remain live while the result is in use. -Env* NewMemEnv(Env* base_env); - -// Returns a new environment that is used for HDFS environment. -// This is a factory method for HdfsEnv declared in hdfs/env_hdfs.h -Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname); - -// Returns a new environment that measures function call times for filesystem -// operations, reporting results to variables in PerfContext. -// This is a factory method for TimedEnv defined in utilities/env_timed.cc. -Env* NewTimedEnv(Env* base_env); - -// Returns an instance of logger that can be used for storing informational -// messages. -// This is a factory method for EnvLogger declared in logging/env_logging.h -Status NewEnvLogger(const std::string& fname, Env* env, - std::shared_ptr* result); - -std::unique_ptr NewCompositeEnv(std::shared_ptr fs); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/env_encryption.h b/dist/darwin_amd64/include/rocksdb/env_encryption.h deleted file mode 100644 index 6c29dc9..0000000 --- a/dist/darwin_amd64/include/rocksdb/env_encryption.h +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#if !defined(ROCKSDB_LITE) - -#include - -#include "rocksdb/env.h" -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class EncryptionProvider; - -struct ConfigOptions; - -// Returns an Env that encrypts data when stored on disk and decrypts data when -// read from disk. -Env* NewEncryptedEnv(Env* base_env, - const std::shared_ptr& provider); - -// BlockAccessCipherStream is the base class for any cipher stream that -// supports random access at block level (without requiring data from other -// blocks). E.g. CTR (Counter operation mode) supports this requirement. -class BlockAccessCipherStream { - public: - virtual ~BlockAccessCipherStream(){}; - - // BlockSize returns the size of each block supported by this cipher stream. - virtual size_t BlockSize() = 0; - - // Encrypt one or more (partial) blocks of data at the file offset. - // Length of data is given in dataSize. - virtual Status Encrypt(uint64_t fileOffset, char* data, size_t dataSize); - - // Decrypt one or more (partial) blocks of data at the file offset. - // Length of data is given in dataSize. - virtual Status Decrypt(uint64_t fileOffset, char* data, size_t dataSize); - - protected: - // Allocate scratch space which is passed to EncryptBlock/DecryptBlock. - virtual void AllocateScratch(std::string&) = 0; - - // Encrypt a block of data at the given block index. - // Length of data is equal to BlockSize(); - virtual Status EncryptBlock(uint64_t blockIndex, char* data, - char* scratch) = 0; - - // Decrypt a block of data at the given block index. - // Length of data is equal to BlockSize(); - virtual Status DecryptBlock(uint64_t blockIndex, char* data, - char* scratch) = 0; -}; - -// BlockCipher -class BlockCipher { - public: - virtual ~BlockCipher(){}; - - // Creates a new BlockCipher from the input config_options and value - // The value describes the type of provider (and potentially optional - // configuration parameters) used to create this provider. - // For example, if the value is "ROT13", a ROT13BlockCipher is created. - // - // @param config_options Options to control how this cipher is created - // and initialized. - // @param value The value might be: - // - ROT13 Create a ROT13 Cipher - // - ROT13:nn Create a ROT13 Cipher with block size of nn - // @param result The new cipher object - // @return OK if the cipher was sucessfully created - // @return NotFound if an invalid name was specified in the value - // @return InvalidArgument if either the options were not valid - static Status CreateFromString(const ConfigOptions& config_options, - const std::string& value, - std::shared_ptr* result); - - // Short-cut method to create a ROT13 BlockCipher. - // This cipher is only suitable for test purposes and should not be used in - // production!!! - static std::shared_ptr NewROT13Cipher(size_t block_size); - - virtual const char* Name() const = 0; - // BlockSize returns the size of each block supported by this cipher stream. - virtual size_t BlockSize() = 0; - - // Encrypt a block of data. - // Length of data is equal to BlockSize(). - virtual Status Encrypt(char* data) = 0; - - // Decrypt a block of data. - // Length of data is equal to BlockSize(). - virtual Status Decrypt(char* data) = 0; -}; - -// The encryption provider is used to create a cipher stream for a specific -// file. The returned cipher stream will be used for actual -// encryption/decryption actions. -class EncryptionProvider { - public: - virtual ~EncryptionProvider(){}; - - // Creates a new EncryptionProvider from the input config_options and value - // The value describes the type of provider (and potentially optional - // configuration parameters) used to create this provider. - // For example, if the value is "CTR", a CTREncryptionProvider will be - // created. If the value is preceded by "test://" (e.g test://CTR"), the - // TEST_Initialize method will be invoked prior to returning the provider. - // - // @param config_options Options to control how this provider is created - // and initialized. - // @param value The value might be: - // - CTR Create a CTR provider - // - test://CTR Create a CTR provider and initialize it for tests. - // @param result The new provider object - // @return OK if the provider was sucessfully created - // @return NotFound if an invalid name was specified in the value - // @return InvalidArgument if either the options were not valid - static Status CreateFromString(const ConfigOptions& config_options, - const std::string& value, - std::shared_ptr* result); - - // Short-cut method to create a CTR-provider - static std::shared_ptr NewCTRProvider( - const std::shared_ptr& cipher); - - // Returns the name of this EncryptionProvider - virtual const char* Name() const = 0; - - // GetPrefixLength returns the length of the prefix that is added to every - // file and used for storing encryption options. For optimal performance, the - // prefix length should be a multiple of the page size. - virtual size_t GetPrefixLength() const = 0; - - // CreateNewPrefix initialized an allocated block of prefix memory - // for a new file. - virtual Status CreateNewPrefix(const std::string& fname, char* prefix, - size_t prefixLength) const = 0; - - // Method to add a new cipher key for use by the EncryptionProvider. - // @param description Descriptor for this key. - // @param cipher The cryptographic key to use - // @param len The length of the cipher key - // @param for_write If true, this cipher should be used for writing files. - // If false, this cipher should only be used for reading - // files - // @return OK if the cipher was successfully added to the provider, non-OK - // otherwise - virtual Status AddCipher(const std::string& descriptor, const char* cipher, - size_t len, bool for_write) = 0; - - // CreateCipherStream creates a block access cipher stream for a file given - // given name and options. - virtual Status CreateCipherStream( - const std::string& fname, const EnvOptions& options, Slice& prefix, - std::unique_ptr* result) = 0; - - // Returns a string representing an encryption marker prefix for this - // provider. If a marker is provided, this marker can be used to tell whether - // or not a file is encrypted by this provider. The maker will also be part - // of any encryption prefix for this provider. - virtual std::string GetMarker() const { return ""; } - - protected: - // Optional method to initialize an EncryptionProvider in the TEST - // environment. - virtual Status TEST_Initialize() { return Status::OK(); } -}; - -class EncryptedSequentialFile : public SequentialFile { - protected: - std::unique_ptr file_; - std::unique_ptr stream_; - uint64_t offset_; - size_t prefixLength_; - - public: - // Default ctor. Given underlying sequential file is supposed to be at - // offset == prefixLength. - EncryptedSequentialFile(std::unique_ptr&& f, - std::unique_ptr&& s, - size_t prefixLength) - : file_(std::move(f)), - stream_(std::move(s)), - offset_(prefixLength), - prefixLength_(prefixLength) {} - - // Read up to "n" bytes from the file. "scratch[0..n-1]" may be - // written by this routine. Sets "*result" to the data that was - // read (including if fewer than "n" bytes were successfully read). - // May set "*result" to point at data in "scratch[0..n-1]", so - // "scratch[0..n-1]" must be live when "*result" is used. - // If an error was encountered, returns a non-OK status. - // - // REQUIRES: External synchronization - virtual Status Read(size_t n, Slice* result, char* scratch) override; - - // Skip "n" bytes from the file. This is guaranteed to be no - // slower that reading the same data, but may be faster. - // - // If end of file is reached, skipping will stop at the end of the - // file, and Skip will return OK. - // - // REQUIRES: External synchronization - virtual Status Skip(uint64_t n) override; - - // Indicates the upper layers if the current SequentialFile implementation - // uses direct IO. - virtual bool use_direct_io() const override; - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const override; - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t offset, size_t length) override; - - // Positioned Read for direct I/O - // If Direct I/O enabled, offset, n, and scratch should be properly aligned - virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, - char* scratch) override; -}; - -// A file abstraction for randomly reading the contents of a file. -class EncryptedRandomAccessFile : public RandomAccessFile { - protected: - std::unique_ptr file_; - std::unique_ptr stream_; - size_t prefixLength_; - - public: - EncryptedRandomAccessFile(std::unique_ptr&& f, - std::unique_ptr&& s, - size_t prefixLength) - : file_(std::move(f)), - stream_(std::move(s)), - prefixLength_(prefixLength) {} - - // Read up to "n" bytes from the file starting at "offset". - // "scratch[0..n-1]" may be written by this routine. Sets "*result" - // to the data that was read (including if fewer than "n" bytes were - // successfully read). May set "*result" to point at data in - // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when - // "*result" is used. If an error was encountered, returns a non-OK - // status. - // - // Safe for concurrent use by multiple threads. - // If Direct I/O enabled, offset, n, and scratch should be aligned properly. - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override; - - // Readahead the file starting from offset by n bytes for caching. - virtual Status Prefetch(uint64_t offset, size_t n) override; - - // Tries to get an unique ID for this file that will be the same each time - // the file is opened (and will stay the same while the file is open). - // Furthermore, it tries to make this ID at most "max_size" bytes. If such an - // ID can be created this function returns the length of the ID and places it - // in "id"; otherwise, this function returns 0, in which case "id" - // may not have been modified. - // - // This function guarantees, for IDs from a given environment, two unique ids - // cannot be made equal to each other by adding arbitrary bytes to one of - // them. That is, no unique ID is the prefix of another. - // - // This function guarantees that the returned ID will not be interpretable as - // a single varint. - // - // Note: these IDs are only valid for the duration of the process. - virtual size_t GetUniqueId(char* id, size_t max_size) const override; - - virtual void Hint(AccessPattern pattern) override; - - // Indicates the upper layers if the current RandomAccessFile implementation - // uses direct IO. - virtual bool use_direct_io() const override; - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const override; - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t offset, size_t length) override; -}; - -// A file abstraction for sequential writing. The implementation -// must provide buffering since callers may append small fragments -// at a time to the file. -class EncryptedWritableFile : public WritableFileWrapper { - protected: - std::unique_ptr file_; - std::unique_ptr stream_; - size_t prefixLength_; - - public: - // Default ctor. Prefix is assumed to be written already. - EncryptedWritableFile(std::unique_ptr&& f, - std::unique_ptr&& s, - size_t prefixLength) - : WritableFileWrapper(f.get()), - file_(std::move(f)), - stream_(std::move(s)), - prefixLength_(prefixLength) {} - - Status Append(const Slice& data) override; - - Status PositionedAppend(const Slice& data, uint64_t offset) override; - - // Indicates the upper layers if the current WritableFile implementation - // uses direct IO. - virtual bool use_direct_io() const override; - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const override; - - /* - * Get the size of valid data in the file. - */ - virtual uint64_t GetFileSize() override; - - // Truncate is necessary to trim the file to the correct size - // before closing. It is not always possible to keep track of the file - // size due to whole pages writes. The behavior is undefined if called - // with other writes to follow. - virtual Status Truncate(uint64_t size) override; - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - // This call has no effect on dirty pages in the cache. - virtual Status InvalidateCache(size_t offset, size_t length) override; - - // Sync a file range with disk. - // offset is the starting byte of the file range to be synchronized. - // nbytes specifies the length of the range to be synchronized. - // This asks the OS to initiate flushing the cached data to disk, - // without waiting for completion. - // Default implementation does nothing. - virtual Status RangeSync(uint64_t offset, uint64_t nbytes) override; - - // PrepareWrite performs any necessary preparation for a write - // before the write actually occurs. This allows for pre-allocation - // of space on devices where it can result in less file - // fragmentation and/or less waste from over-zealous filesystem - // pre-allocation. - virtual void PrepareWrite(size_t offset, size_t len) override; - - // Pre-allocates space for a file. - virtual Status Allocate(uint64_t offset, uint64_t len) override; -}; - -// A file abstraction for random reading and writing. -class EncryptedRandomRWFile : public RandomRWFile { - protected: - std::unique_ptr file_; - std::unique_ptr stream_; - size_t prefixLength_; - - public: - EncryptedRandomRWFile(std::unique_ptr&& f, - std::unique_ptr&& s, - size_t prefixLength) - : file_(std::move(f)), - stream_(std::move(s)), - prefixLength_(prefixLength) {} - - // Indicates if the class makes use of direct I/O - // If false you must pass aligned buffer to Write() - virtual bool use_direct_io() const override; - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const override; - - // Write bytes in `data` at offset `offset`, Returns Status::OK() on success. - // Pass aligned buffer when use_direct_io() returns true. - virtual Status Write(uint64_t offset, const Slice& data) override; - - // Read up to `n` bytes starting from offset `offset` and store them in - // result, provided `scratch` size should be at least `n`. - // Returns Status::OK() on success. - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override; - - virtual Status Flush() override; - - virtual Status Sync() override; - - virtual Status Fsync() override; - - virtual Status Close() override; -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // !defined(ROCKSDB_LITE) diff --git a/dist/darwin_amd64/include/rocksdb/experimental.h b/dist/darwin_amd64/include/rocksdb/experimental.h deleted file mode 100644 index f26d637..0000000 --- a/dist/darwin_amd64/include/rocksdb/experimental.h +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include "rocksdb/db.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { -namespace experimental { - -// Supported only for Leveled compaction -Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end); -Status SuggestCompactRange(DB* db, const Slice* begin, const Slice* end); - -// Move all L0 files to target_level skipping compaction. -// This operation succeeds only if the files in L0 have disjoint ranges; this -// is guaranteed to happen, for instance, if keys are inserted in sorted -// order. Furthermore, all levels between 1 and target_level must be empty. -// If any of the above condition is violated, InvalidArgument will be -// returned. -Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, - int target_level = 1); - -} // namespace experimental -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/file_checksum.h b/dist/darwin_amd64/include/rocksdb/file_checksum.h deleted file mode 100644 index 37b1744..0000000 --- a/dist/darwin_amd64/include/rocksdb/file_checksum.h +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2013 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include -#include -#include -#include -#include - -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -// The unknown file checksum. -constexpr char kUnknownFileChecksum[] = ""; -// The unknown sst file checksum function name. -constexpr char kUnknownFileChecksumFuncName[] = "Unknown"; -// The standard DB file checksum function name. -// This is the name of the checksum function returned by -// GetFileChecksumGenCrc32cFactory(); -constexpr char kStandardDbFileChecksumFuncName[] = "FileChecksumCrc32c"; - -struct FileChecksumGenContext { - std::string file_name; - // The name of the requested checksum generator. - // Checksum factories may use or ignore requested_checksum_func_name, - // and checksum factories written before this field was available are still - // compatible. - std::string requested_checksum_func_name; -}; - -// FileChecksumGenerator is the class to generates the checksum value -// for each file when the file is written to the file system. -// Implementations may assume that -// * Finalize is called at most once during the life of the object -// * All calls to Update come before Finalize -// * All calls to GetChecksum come after Finalize -class FileChecksumGenerator { - public: - virtual ~FileChecksumGenerator() {} - - // Update the current result after process the data. For different checksum - // functions, the temporal results may be stored and used in Update to - // include the new data. - virtual void Update(const char* data, size_t n) = 0; - - // Generate the final results if no further new data will be updated. - virtual void Finalize() = 0; - - // Get the checksum. The result should not be the empty string and may - // include arbitrary bytes, including non-printable characters. - virtual std::string GetChecksum() const = 0; - - // Returns a name that identifies the current file checksum function. - virtual const char* Name() const = 0; -}; - -// Create the FileChecksumGenerator object for each SST file. -class FileChecksumGenFactory { - public: - virtual ~FileChecksumGenFactory() {} - - // Create a new FileChecksumGenerator. - virtual std::unique_ptr CreateFileChecksumGenerator( - const FileChecksumGenContext& context) = 0; - - // Return the name of this FileChecksumGenFactory. - virtual const char* Name() const = 0; -}; - -// FileChecksumList stores the checksum information of a list of files (e.g., -// SST files). The FileChecksumLIst can be used to store the checksum -// information of all SST file getting from the MANIFEST, which are -// the checksum information of all valid SST file of a DB instance. It can -// also be used to store the checksum information of a list of SST files to -// be ingested. -class FileChecksumList { - public: - virtual ~FileChecksumList() {} - - // Clean the previously stored file checksum information. - virtual void reset() = 0; - - // Get the number of checksums in the checksum list - virtual size_t size() const = 0; - - // Return all the file checksum information being stored in a unordered_map. - // File_number is the key, the first part of the value is checksum value, - // and the second part of the value is checksum function name. - virtual Status GetAllFileChecksums( - std::vector* file_numbers, std::vector* checksums, - std::vector* checksum_func_names) = 0; - - // Given the file_number, it searches if the file checksum information is - // stored. - virtual Status SearchOneFileChecksum(uint64_t file_number, - std::string* checksum, - std::string* checksum_func_name) = 0; - - // Insert the checksum information of one file to the FileChecksumList. - virtual Status InsertOneFileChecksum( - uint64_t file_number, const std::string& checksum, - const std::string& checksum_func_name) = 0; - - // Remove the checksum information of one SST file. - virtual Status RemoveOneFileChecksum(uint64_t file_number) = 0; -}; - -// Create a new file checksum list. -extern FileChecksumList* NewFileChecksumList(); - -// Return a shared_ptr of the builtin Crc32c based file checksum generatory -// factory object, which can be shared to create the Crc32c based checksum -// generator object. -// Note: this implementation is compatible with many other crc32c checksum -// implementations and uses big-endian encoding of the result, unlike most -// other crc32c checksums in RocksDB, which alter the result with -// crc32c::Mask and use little-endian encoding. -extern std::shared_ptr -GetFileChecksumGenCrc32cFactory(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/file_system.h b/dist/darwin_amd64/include/rocksdb/file_system.h deleted file mode 100644 index 3683491..0000000 --- a/dist/darwin_amd64/include/rocksdb/file_system.h +++ /dev/null @@ -1,1467 +0,0 @@ -// Copyright (c) 2019-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// A FileSystem is an interface used by the rocksdb implementation to access -// storage functionality like the filesystem etc. Callers -// may wish to provide a custom FileSystem object when opening a database to -// get fine gain control; e.g., to rate limit file system operations. -// -// All FileSystem implementations are safe for concurrent access from -// multiple threads without any external synchronization. -// -// WARNING: Since this is a new interface, it is expected that there will be -// some changes as storage systems are ported over. - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "rocksdb/env.h" -#include "rocksdb/io_status.h" -#include "rocksdb/options.h" -#include "rocksdb/thread_status.h" - -namespace ROCKSDB_NAMESPACE { - -class FileLock; -class FSDirectory; -class FSRandomAccessFile; -class FSRandomRWFile; -class FSSequentialFile; -class FSWritableFile; -class Logger; -class Slice; -struct ImmutableDBOptions; -struct MutableDBOptions; -class RateLimiter; - -using AccessPattern = RandomAccessFile::AccessPattern; -using FileAttributes = Env::FileAttributes; - -// Priority of an IO request. This is a hint and does not guarantee any -// particular QoS. -// IO_LOW - Typically background reads/writes such as compaction/flush -// IO_HIGH - Typically user reads/synchronous WAL writes -enum class IOPriority : uint8_t { - kIOLow, - kIOHigh, - kIOTotal, -}; - -// Type of the data begin read/written. It can be passed down as a flag -// for the FileSystem implementation to optionally handle different types in -// different ways -enum class IOType : uint8_t { - kData, - kFilter, - kIndex, - kMetadata, - kWAL, - kManifest, - kLog, - kUnknown, - kInvalid, -}; - -// Per-request options that can be passed down to the FileSystem -// implementation. These are hints and are not necessarily guaranteed to be -// honored. More hints can be added here in the future to indicate things like -// storage media (HDD/SSD) to be used, replication level etc. -struct IOOptions { - // Timeout for the operation in microseconds - std::chrono::microseconds timeout; - - // Priority - high or low - IOPriority prio; - - // Type of data being read/written - IOType type; - - IOOptions() : timeout(0), prio(IOPriority::kIOLow), type(IOType::kUnknown) {} -}; - -// File scope options that control how a file is opened/created and accessed -// while its open. We may add more options here in the future such as -// redundancy level, media to use etc. -struct FileOptions : EnvOptions { - // Embedded IOOptions to control the parameters for any IOs that need - // to be issued for the file open/creation - IOOptions io_options; - - FileOptions() : EnvOptions() {} - - FileOptions(const DBOptions& opts) - : EnvOptions(opts) {} - - FileOptions(const EnvOptions& opts) - : EnvOptions(opts) {} - - FileOptions(const FileOptions& opts) - : EnvOptions(opts), io_options(opts.io_options) {} - - FileOptions& operator=(const FileOptions& opts) = default; -}; - -// A structure to pass back some debugging information from the FileSystem -// implementation to RocksDB in case of an IO error -struct IODebugContext { - // file_path to be filled in by RocksDB in case of an error - std::string file_path; - - // A map of counter names to values - set by the FileSystem implementation - std::map counters; - - // To be set by the FileSystem implementation - std::string msg; - - IODebugContext() {} - - void AddCounter(std::string& name, uint64_t value) { - counters.emplace(name, value); - } - - std::string ToString() { - std::ostringstream ss; - ss << file_path << ", "; - for (auto counter : counters) { - ss << counter.first << " = " << counter.second << ","; - } - ss << msg; - return ss.str(); - } -}; - -// The FileSystem, FSSequentialFile, FSRandomAccessFile, FSWritableFile, -// FSRandomRWFileclass, and FSDIrectory classes define the interface between -// RocksDB and storage systems, such as Posix filesystems, -// remote filesystems etc. -// The interface allows for fine grained control of individual IO operations, -// such as setting a timeout, prioritization, hints on data placement, -// different handling based on type of IO etc. -// This is accomplished by passing an instance of IOOptions to every -// API call that can potentially perform IO. Additionally, each such API is -// passed a pointer to a IODebugContext structure that can be used by the -// storage system to include troubleshooting information. The return values -// of the APIs is of type IOStatus, which can indicate an error code/sub-code, -// as well as metadata about the error such as its scope and whether its -// retryable. -class FileSystem { - public: - FileSystem(); - - // No copying allowed - FileSystem(const FileSystem&) = delete; - - virtual ~FileSystem(); - - virtual const char* Name() const = 0; - - static const char* Type() { return "FileSystem"; } - - // Loads the FileSystem specified by the input value into the result - static Status Load(const std::string& value, - std::shared_ptr* result); - - // Return a default fie_system suitable for the current operating - // system. Sophisticated users may wish to provide their own Env - // implementation instead of relying on this default file_system - // - // The result of Default() belongs to rocksdb and must never be deleted. - static std::shared_ptr Default(); - - // Handles the event when a new DB or a new ColumnFamily starts using the - // specified data paths. - // - // The data paths might be shared by different DBs or ColumnFamilies, - // so RegisterDbPaths might be called with the same data paths. - // For example, when CreateColumnFamily is called multiple times with the same - // data path, RegisterDbPaths will also be called with the same data path. - // - // If the return status is ok, then the paths must be correspondingly - // called in UnregisterDbPaths; - // otherwise this method should have no side effect, and UnregisterDbPaths - // do not need to be called for the paths. - // - // Different implementations may take different actions. - // By default, it's a no-op and returns Status::OK. - virtual Status RegisterDbPaths(const std::vector& /*paths*/) { - return Status::OK(); - } - // Handles the event a DB or a ColumnFamily stops using the specified data - // paths. - // - // It should be called corresponding to each successful RegisterDbPaths. - // - // Different implementations may take different actions. - // By default, it's a no-op and returns Status::OK. - virtual Status UnregisterDbPaths(const std::vector& /*paths*/) { - return Status::OK(); - } - - // Create a brand new sequentially-readable file with the specified name. - // On success, stores a pointer to the new file in *result and returns OK. - // On failure stores nullptr in *result and returns non-OK. If the file does - // not exist, returns a non-OK status. - // - // The returned file will only be accessed by one thread at a time. - virtual IOStatus NewSequentialFile(const std::string& fname, - const FileOptions& file_opts, - std::unique_ptr* result, - IODebugContext* dbg) = 0; - - // Create a brand new random access read-only file with the - // specified name. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. If the file does not exist, returns a non-OK - // status. - // - // The returned file may be concurrently accessed by multiple threads. - virtual IOStatus NewRandomAccessFile( - const std::string& fname, const FileOptions& file_opts, - std::unique_ptr* result, - IODebugContext* dbg) = 0; - // These values match Linux definition - // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fcntl.h#n56 - enum WriteLifeTimeHint { - kWLTHNotSet = 0, // No hint information set - kWLTHNone, // No hints about write life time - kWLTHShort, // Data written has a short life time - kWLTHMedium, // Data written has a medium life time - kWLTHLong, // Data written has a long life time - kWLTHExtreme, // Data written has an extremely long life time - }; - - // Create an object that writes to a new file with the specified - // name. Deletes any existing file with the same name and creates a - // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - virtual IOStatus NewWritableFile(const std::string& fname, - const FileOptions& file_opts, - std::unique_ptr* result, - IODebugContext* dbg) = 0; - - // Create an object that writes to a new file with the specified - // name. Deletes any existing file with the same name and creates a - // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - virtual IOStatus ReopenWritableFile( - const std::string& /*fname*/, const FileOptions& /*options*/, - std::unique_ptr* /*result*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); - } - - // Reuse an existing file by renaming it and opening it as writable. - virtual IOStatus ReuseWritableFile(const std::string& fname, - const std::string& old_fname, - const FileOptions& file_opts, - std::unique_ptr* result, - IODebugContext* dbg); - - // Open `fname` for random read and write, if file doesn't exist the file - // will be created. On success, stores a pointer to the new file in - // *result and returns OK. On failure returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - virtual IOStatus NewRandomRWFile(const std::string& /*fname*/, - const FileOptions& /*options*/, - std::unique_ptr* /*result*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported( - "RandomRWFile is not implemented in this FileSystem"); - } - - // Opens `fname` as a memory-mapped file for read and write (in-place updates - // only, i.e., no appends). On success, stores a raw buffer covering the whole - // file in `*result`. The file must exist prior to this call. - virtual IOStatus NewMemoryMappedFileBuffer( - const std::string& /*fname*/, - std::unique_ptr* /*result*/) { - return IOStatus::NotSupported( - "MemoryMappedFileBuffer is not implemented in this FileSystem"); - } - - // Create an object that represents a directory. Will fail if directory - // doesn't exist. If the directory exists, it will open the directory - // and create a new Directory object. - // - // On success, stores a pointer to the new Directory in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - virtual IOStatus NewDirectory(const std::string& name, - const IOOptions& io_opts, - std::unique_ptr* result, - IODebugContext* dbg) = 0; - - // Returns OK if the named file exists. - // NotFound if the named file does not exist, - // the calling process does not have permission to determine - // whether this file exists, or if the path is invalid. - // IOError if an IO Error was encountered - virtual IOStatus FileExists(const std::string& fname, - const IOOptions& options, - IODebugContext* dbg) = 0; - - // Store in *result the names of the children of the specified directory. - // The names are relative to "dir". - // Original contents of *results are dropped. - // Returns OK if "dir" exists and "*result" contains its children. - // NotFound if "dir" does not exist, the calling process does not have - // permission to access "dir", or if "dir" is invalid. - // IOError if an IO Error was encountered - virtual IOStatus GetChildren(const std::string& dir, const IOOptions& options, - std::vector* result, - IODebugContext* dbg) = 0; - - // Store in *result the attributes of the children of the specified directory. - // In case the implementation lists the directory prior to iterating the files - // and files are concurrently deleted, the deleted files will be omitted from - // result. - // The name attributes are relative to "dir". - // Original contents of *results are dropped. - // Returns OK if "dir" exists and "*result" contains its children. - // NotFound if "dir" does not exist, the calling process does not have - // permission to access "dir", or if "dir" is invalid. - // IOError if an IO Error was encountered - virtual IOStatus GetChildrenFileAttributes( - const std::string& dir, const IOOptions& options, - std::vector* result, IODebugContext* dbg) { - assert(result != nullptr); - std::vector child_fnames; - IOStatus s = GetChildren(dir, options, &child_fnames, dbg); - if (!s.ok()) { - return s; - } - result->resize(child_fnames.size()); - size_t result_size = 0; - for (size_t i = 0; i < child_fnames.size(); ++i) { - const std::string path = dir + "/" + child_fnames[i]; - if (!(s = GetFileSize(path, options, &(*result)[result_size].size_bytes, - dbg)) - .ok()) { - if (FileExists(path, options, dbg).IsNotFound()) { - // The file may have been deleted since we listed the directory - continue; - } - return s; - } - (*result)[result_size].name = std::move(child_fnames[i]); - result_size++; - } - result->resize(result_size); - return IOStatus::OK(); - } - - // Delete the named file. - virtual IOStatus DeleteFile(const std::string& fname, - const IOOptions& options, - IODebugContext* dbg) = 0; - - // Truncate the named file to the specified size. - virtual IOStatus Truncate(const std::string& /*fname*/, size_t /*size*/, - const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported("Truncate is not supported for this FileSystem"); - } - - // Create the specified directory. Returns error if directory exists. - virtual IOStatus CreateDir(const std::string& dirname, - const IOOptions& options, IODebugContext* dbg) = 0; - - // Creates directory if missing. Return Ok if it exists, or successful in - // Creating. - virtual IOStatus CreateDirIfMissing(const std::string& dirname, - const IOOptions& options, - IODebugContext* dbg) = 0; - - // Delete the specified directory. - virtual IOStatus DeleteDir(const std::string& dirname, - const IOOptions& options, IODebugContext* dbg) = 0; - - // Store the size of fname in *file_size. - virtual IOStatus GetFileSize(const std::string& fname, - const IOOptions& options, uint64_t* file_size, - IODebugContext* dbg) = 0; - - // Store the last modification time of fname in *file_mtime. - virtual IOStatus GetFileModificationTime(const std::string& fname, - const IOOptions& options, - uint64_t* file_mtime, - IODebugContext* dbg) = 0; - // Rename file src to target. - virtual IOStatus RenameFile(const std::string& src, const std::string& target, - const IOOptions& options, - IODebugContext* dbg) = 0; - - // Hard Link file src to target. - virtual IOStatus LinkFile(const std::string& /*src*/, - const std::string& /*target*/, - const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported("LinkFile is not supported for this FileSystem"); - } - - virtual IOStatus NumFileLinks(const std::string& /*fname*/, - const IOOptions& /*options*/, - uint64_t* /*count*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported( - "Getting number of file links is not supported for this FileSystem"); - } - - virtual IOStatus AreFilesSame(const std::string& /*first*/, - const std::string& /*second*/, - const IOOptions& /*options*/, bool* /*res*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported("AreFilesSame is not supported for this FileSystem"); - } - - // Lock the specified file. Used to prevent concurrent access to - // the same db by multiple processes. On failure, stores nullptr in - // *lock and returns non-OK. - // - // On success, stores a pointer to the object that represents the - // acquired lock in *lock and returns OK. The caller should call - // UnlockFile(*lock) to release the lock. If the process exits, - // the lock will be automatically released. - // - // If somebody else already holds the lock, finishes immediately - // with a failure. I.e., this call does not wait for existing locks - // to go away. - // - // May create the named file if it does not already exist. - virtual IOStatus LockFile(const std::string& fname, const IOOptions& options, - FileLock** lock, IODebugContext* dbg) = 0; - - // Release the lock acquired by a previous successful call to LockFile. - // REQUIRES: lock was returned by a successful LockFile() call - // REQUIRES: lock has not already been unlocked. - virtual IOStatus UnlockFile(FileLock* lock, const IOOptions& options, - IODebugContext* dbg) = 0; - - // *path is set to a temporary directory that can be used for testing. It may - // or many not have just been created. The directory may or may not differ - // between runs of the same process, but subsequent calls will return the - // same directory. - virtual IOStatus GetTestDirectory(const IOOptions& options, std::string* path, - IODebugContext* dbg) = 0; - - // Create and returns a default logger (an instance of EnvLogger) for storing - // informational messages. Derived classes can overide to provide custom - // logger. - virtual IOStatus NewLogger(const std::string& fname, const IOOptions& io_opts, - std::shared_ptr* result, - IODebugContext* dbg) = 0; - - // Get full directory name for this db. - virtual IOStatus GetAbsolutePath(const std::string& db_path, - const IOOptions& options, - std::string* output_path, - IODebugContext* dbg) = 0; - - // Sanitize the FileOptions. Typically called by a FileOptions/EnvOptions - // copy constructor - virtual void SanitizeFileOptions(FileOptions* /*opts*/) const {} - - // OptimizeForLogRead will create a new FileOptions object that is a copy of - // the FileOptions in the parameters, but is optimized for reading log files. - virtual FileOptions OptimizeForLogRead(const FileOptions& file_options) const; - - // OptimizeForManifestRead will create a new FileOptions object that is a copy - // of the FileOptions in the parameters, but is optimized for reading manifest - // files. - virtual FileOptions OptimizeForManifestRead( - const FileOptions& file_options) const; - - // OptimizeForLogWrite will create a new FileOptions object that is a copy of - // the FileOptions in the parameters, but is optimized for writing log files. - // Default implementation returns the copy of the same object. - virtual FileOptions OptimizeForLogWrite(const FileOptions& file_options, - const DBOptions& db_options) const; - - // OptimizeForManifestWrite will create a new FileOptions object that is a - // copy of the FileOptions in the parameters, but is optimized for writing - // manifest files. Default implementation returns the copy of the same - // object. - virtual FileOptions OptimizeForManifestWrite( - const FileOptions& file_options) const; - - // OptimizeForCompactionTableWrite will create a new FileOptions object that - // is a copy of the FileOptions in the parameters, but is optimized for - // writing table files. - virtual FileOptions OptimizeForCompactionTableWrite( - const FileOptions& file_options, - const ImmutableDBOptions& immutable_ops) const; - - // OptimizeForCompactionTableRead will create a new FileOptions object that - // is a copy of the FileOptions in the parameters, but is optimized for - // reading table files. - virtual FileOptions OptimizeForCompactionTableRead( - const FileOptions& file_options, - const ImmutableDBOptions& db_options) const; - -// This seems to clash with a macro on Windows, so #undef it here -#ifdef GetFreeSpace -#undef GetFreeSpace -#endif - - // Get the amount of free disk space - virtual IOStatus GetFreeSpace(const std::string& /*path*/, - const IOOptions& /*options*/, - uint64_t* /*diskfree*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); - } - - virtual IOStatus IsDirectory(const std::string& /*path*/, - const IOOptions& options, bool* is_dir, - IODebugContext* /*dgb*/) = 0; - - // If you're adding methods here, remember to add them to EnvWrapper too. - - private: - void operator=(const FileSystem&); -}; - -// A file abstraction for reading sequentially through a file -class FSSequentialFile { - public: - FSSequentialFile() {} - - virtual ~FSSequentialFile() {} - - // Read up to "n" bytes from the file. "scratch[0..n-1]" may be - // written by this routine. Sets "*result" to the data that was - // read (including if fewer than "n" bytes were successfully read). - // May set "*result" to point at data in "scratch[0..n-1]", so - // "scratch[0..n-1]" must be live when "*result" is used. - // If an error was encountered, returns a non-OK status. - // - // REQUIRES: External synchronization - virtual IOStatus Read(size_t n, const IOOptions& options, Slice* result, - char* scratch, IODebugContext* dbg) = 0; - - // Skip "n" bytes from the file. This is guaranteed to be no - // slower that reading the same data, but may be faster. - // - // If end of file is reached, skipping will stop at the end of the - // file, and Skip will return OK. - // - // REQUIRES: External synchronization - virtual IOStatus Skip(uint64_t n) = 0; - - // Indicates the upper layers if the current SequentialFile implementation - // uses direct IO. - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - virtual IOStatus InvalidateCache(size_t /*offset*/, size_t /*length*/) { - return IOStatus::NotSupported("InvalidateCache not supported."); - } - - // Positioned Read for direct I/O - // If Direct I/O enabled, offset, n, and scratch should be properly aligned - virtual IOStatus PositionedRead(uint64_t /*offset*/, size_t /*n*/, - const IOOptions& /*options*/, - Slice* /*result*/, char* /*scratch*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); - } - - // If you're adding methods here, remember to add them to - // SequentialFileWrapper too. -}; - -// A read IO request structure for use in MultiRead -struct FSReadRequest { - // File offset in bytes - uint64_t offset; - - // Length to read in bytes - size_t len; - - // A buffer that MultiRead() can optionally place data in. It can - // ignore this and allocate its own buffer - char* scratch; - - // Output parameter set by MultiRead() to point to the data buffer, and - // the number of valid bytes - Slice result; - - // Status of read - IOStatus status; -}; - -// A file abstraction for randomly reading the contents of a file. -class FSRandomAccessFile { - public: - FSRandomAccessFile() {} - - virtual ~FSRandomAccessFile() {} - - // Read up to "n" bytes from the file starting at "offset". - // "scratch[0..n-1]" may be written by this routine. Sets "*result" - // to the data that was read (including if fewer than "n" bytes were - // successfully read). May set "*result" to point at data in - // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when - // "*result" is used. If an error was encountered, returns a non-OK - // status. - // - // Safe for concurrent use by multiple threads. - // If Direct I/O enabled, offset, n, and scratch should be aligned properly. - virtual IOStatus Read(uint64_t offset, size_t n, const IOOptions& options, - Slice* result, char* scratch, - IODebugContext* dbg) const = 0; - - // Readahead the file starting from offset by n bytes for caching. - // If it's not implemented (default: `NotSupported`), RocksDB will create - // internal prefetch buffer to improve read performance. - virtual IOStatus Prefetch(uint64_t /*offset*/, size_t /*n*/, - const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); - } - - // Read a bunch of blocks as described by reqs. The blocks can - // optionally be read in parallel. This is a synchronous call, i.e it - // should return after all reads have completed. The reads will be - // non-overlapping. If the function return Status is not ok, status of - // individual requests will be ignored and return status will be assumed - // for all read requests. The function return status is only meant for any - // any errors that occur before even processing specific read requests - virtual IOStatus MultiRead(FSReadRequest* reqs, size_t num_reqs, - const IOOptions& options, IODebugContext* dbg) { - assert(reqs != nullptr); - for (size_t i = 0; i < num_reqs; ++i) { - FSReadRequest& req = reqs[i]; - req.status = - Read(req.offset, req.len, options, &req.result, req.scratch, dbg); - } - return IOStatus::OK(); - } - - // Tries to get an unique ID for this file that will be the same each time - // the file is opened (and will stay the same while the file is open). - // Furthermore, it tries to make this ID at most "max_size" bytes. If such an - // ID can be created this function returns the length of the ID and places it - // in "id"; otherwise, this function returns 0, in which case "id" - // may not have been modified. - // - // This function guarantees, for IDs from a given environment, two unique ids - // cannot be made equal to each other by adding arbitrary bytes to one of - // them. That is, no unique ID is the prefix of another. - // - // This function guarantees that the returned ID will not be interpretable as - // a single varint. - // - // Note: these IDs are only valid for the duration of the process. - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { - return 0; // Default implementation to prevent issues with backwards - // compatibility. - }; - - enum AccessPattern { kNormal, kRandom, kSequential, kWillNeed, kWontNeed }; - - virtual void Hint(AccessPattern /*pattern*/) {} - - // Indicates the upper layers if the current RandomAccessFile implementation - // uses direct IO. - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - virtual IOStatus InvalidateCache(size_t /*offset*/, size_t /*length*/) { - return IOStatus::NotSupported("InvalidateCache not supported."); - } - - // If you're adding methods here, remember to add them to - // RandomAccessFileWrapper too. -}; - -// A data structure brings the data verification information, which is -// used togther with data being written to a file. -struct DataVerificationInfo { - // checksum of the data being written. - Slice checksum; -}; - -// A file abstraction for sequential writing. The implementation -// must provide buffering since callers may append small fragments -// at a time to the file. -class FSWritableFile { - public: - FSWritableFile() - : last_preallocated_block_(0), - preallocation_block_size_(0), - io_priority_(Env::IO_TOTAL), - write_hint_(Env::WLTH_NOT_SET), - strict_bytes_per_sync_(false) {} - - explicit FSWritableFile(const FileOptions& options) - : last_preallocated_block_(0), - preallocation_block_size_(0), - io_priority_(Env::IO_TOTAL), - write_hint_(Env::WLTH_NOT_SET), - strict_bytes_per_sync_(options.strict_bytes_per_sync) {} - - virtual ~FSWritableFile() {} - - // Append data to the end of the file - // Note: A WriteabelFile object must support either Append or - // PositionedAppend, so the users cannot mix the two. - virtual IOStatus Append(const Slice& data, const IOOptions& options, - IODebugContext* dbg) = 0; - - // EXPERIMENTAL / CURRENTLY UNUSED - // Append data with verification information - // Note that this API change is experimental and it might be changed in - // the future. Currently, RocksDB does not use this API. - virtual IOStatus Append(const Slice& data, const IOOptions& options, - const DataVerificationInfo& /* verification_info */, - IODebugContext* dbg) { - return Append(data, options, dbg); - } - - // PositionedAppend data to the specified offset. The new EOF after append - // must be larger than the previous EOF. This is to be used when writes are - // not backed by OS buffers and hence has to always start from the start of - // the sector. The implementation thus needs to also rewrite the last - // partial sector. - // Note: PositionAppend does not guarantee moving the file offset after the - // write. A WritableFile object must support either Append or - // PositionedAppend, so the users cannot mix the two. - // - // PositionedAppend() can only happen on the page/sector boundaries. For that - // reason, if the last write was an incomplete sector we still need to rewind - // back to the nearest sector/page and rewrite the portion of it with whatever - // we need to add. We need to keep where we stop writing. - // - // PositionedAppend() can only write whole sectors. For that reason we have to - // pad with zeros for the last write and trim the file when closing according - // to the position we keep in the previous step. - // - // PositionedAppend() requires aligned buffer to be passed in. The alignment - // required is queried via GetRequiredBufferAlignment() - virtual IOStatus PositionedAppend(const Slice& /* data */, - uint64_t /* offset */, - const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); - } - - // EXPERIMENTAL / CURRENTLY UNUSED - // PositionedAppend data with verification information. - // Note that this API change is experimental and it might be changed in - // the future. Currently, RocksDB does not use this API. - virtual IOStatus PositionedAppend( - const Slice& /* data */, uint64_t /* offset */, - const IOOptions& /*options*/, - const DataVerificationInfo& /* verification_info */, - IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); - } - - // Truncate is necessary to trim the file to the correct size - // before closing. It is not always possible to keep track of the file - // size due to whole pages writes. The behavior is undefined if called - // with other writes to follow. - virtual IOStatus Truncate(uint64_t /*size*/, const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return IOStatus::OK(); - } - virtual IOStatus Close(const IOOptions& options, IODebugContext* dbg) = 0; - virtual IOStatus Flush(const IOOptions& options, IODebugContext* dbg) = 0; - virtual IOStatus Sync(const IOOptions& options, - IODebugContext* dbg) = 0; // sync data - - /* - * Sync data and/or metadata as well. - * By default, sync only data. - * Override this method for environments where we need to sync - * metadata as well. - */ - virtual IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) { - return Sync(options, dbg); - } - - // true if Sync() and Fsync() are safe to call concurrently with Append() - // and Flush(). - virtual bool IsSyncThreadSafe() const { return false; } - - // Indicates the upper layers if the current WritableFile implementation - // uses direct IO. - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - virtual void SetWriteLifeTimeHint(Env::WriteLifeTimeHint hint) { - write_hint_ = hint; - } - - virtual void SetIOPriority(Env::IOPriority pri) { io_priority_ = pri; } - - virtual Env::IOPriority GetIOPriority() { return io_priority_; } - - virtual Env::WriteLifeTimeHint GetWriteLifeTimeHint() { return write_hint_; } - /* - * Get the size of valid data in the file. - */ - virtual uint64_t GetFileSize(const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return 0; - } - - /* - * Get and set the default pre-allocation block size for writes to - * this file. If non-zero, then Allocate will be used to extend the - * underlying storage of a file (generally via fallocate) if the Env - * instance supports it. - */ - virtual void SetPreallocationBlockSize(size_t size) { - preallocation_block_size_ = size; - } - - virtual void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) { - *last_allocated_block = last_preallocated_block_; - *block_size = preallocation_block_size_; - } - - // For documentation, refer to RandomAccessFile::GetUniqueId() - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { - return 0; // Default implementation to prevent issues with backwards - } - - // Remove any kind of caching of data from the offset to offset+length - // of this file. If the length is 0, then it refers to the end of file. - // If the system is not caching the file contents, then this is a noop. - // This call has no effect on dirty pages in the cache. - virtual IOStatus InvalidateCache(size_t /*offset*/, size_t /*length*/) { - return IOStatus::NotSupported("InvalidateCache not supported."); - } - - // Sync a file range with disk. - // offset is the starting byte of the file range to be synchronized. - // nbytes specifies the length of the range to be synchronized. - // This asks the OS to initiate flushing the cached data to disk, - // without waiting for completion. - // Default implementation does nothing. - virtual IOStatus RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/, - const IOOptions& options, IODebugContext* dbg) { - if (strict_bytes_per_sync_) { - return Sync(options, dbg); - } - return IOStatus::OK(); - } - - // PrepareWrite performs any necessary preparation for a write - // before the write actually occurs. This allows for pre-allocation - // of space on devices where it can result in less file - // fragmentation and/or less waste from over-zealous filesystem - // pre-allocation. - virtual void PrepareWrite(size_t offset, size_t len, const IOOptions& options, - IODebugContext* dbg) { - if (preallocation_block_size_ == 0) { - return; - } - // If this write would cross one or more preallocation blocks, - // determine what the last preallocation block necessary to - // cover this write would be and Allocate to that point. - const auto block_size = preallocation_block_size_; - size_t new_last_preallocated_block = - (offset + len + block_size - 1) / block_size; - if (new_last_preallocated_block > last_preallocated_block_) { - size_t num_spanned_blocks = - new_last_preallocated_block - last_preallocated_block_; - Allocate(block_size * last_preallocated_block_, - block_size * num_spanned_blocks, options, dbg) - .PermitUncheckedError(); - last_preallocated_block_ = new_last_preallocated_block; - } - } - - // Pre-allocates space for a file. - virtual IOStatus Allocate(uint64_t /*offset*/, uint64_t /*len*/, - const IOOptions& /*options*/, - IODebugContext* /*dbg*/) { - return IOStatus::OK(); - } - - // If you're adding methods here, remember to add them to - // WritableFileWrapper too. - - protected: - size_t preallocation_block_size() { return preallocation_block_size_; } - - private: - size_t last_preallocated_block_; - size_t preallocation_block_size_; - // No copying allowed - FSWritableFile(const FSWritableFile&); - void operator=(const FSWritableFile&); - - protected: - Env::IOPriority io_priority_; - Env::WriteLifeTimeHint write_hint_; - const bool strict_bytes_per_sync_; -}; - -// A file abstraction for random reading and writing. -class FSRandomRWFile { - public: - FSRandomRWFile() {} - - virtual ~FSRandomRWFile() {} - - // Indicates if the class makes use of direct I/O - // If false you must pass aligned buffer to Write() - virtual bool use_direct_io() const { return false; } - - // Use the returned alignment value to allocate - // aligned buffer for Direct I/O - virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; } - - // Write bytes in `data` at offset `offset`, Returns Status::OK() on success. - // Pass aligned buffer when use_direct_io() returns true. - virtual IOStatus Write(uint64_t offset, const Slice& data, - const IOOptions& options, IODebugContext* dbg) = 0; - - // Read up to `n` bytes starting from offset `offset` and store them in - // result, provided `scratch` size should be at least `n`. - // Returns Status::OK() on success. - virtual IOStatus Read(uint64_t offset, size_t n, const IOOptions& options, - Slice* result, char* scratch, - IODebugContext* dbg) const = 0; - - virtual IOStatus Flush(const IOOptions& options, IODebugContext* dbg) = 0; - - virtual IOStatus Sync(const IOOptions& options, IODebugContext* dbg) = 0; - - virtual IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) { - return Sync(options, dbg); - } - - virtual IOStatus Close(const IOOptions& options, IODebugContext* dbg) = 0; - - // If you're adding methods here, remember to add them to - // RandomRWFileWrapper too. - - // No copying allowed - FSRandomRWFile(const RandomRWFile&) = delete; - FSRandomRWFile& operator=(const RandomRWFile&) = delete; -}; - -// MemoryMappedFileBuffer object represents a memory-mapped file's raw buffer. -// Subclasses should release the mapping upon destruction. -class FSMemoryMappedFileBuffer { - public: - FSMemoryMappedFileBuffer(void* _base, size_t _length) - : base_(_base), length_(_length) {} - - virtual ~FSMemoryMappedFileBuffer() = 0; - - // We do not want to unmap this twice. We can make this class - // movable if desired, however, since - FSMemoryMappedFileBuffer(const FSMemoryMappedFileBuffer&) = delete; - FSMemoryMappedFileBuffer& operator=(const FSMemoryMappedFileBuffer&) = delete; - - void* GetBase() const { return base_; } - size_t GetLen() const { return length_; } - - protected: - void* base_; - const size_t length_; -}; - -// Directory object represents collection of files and implements -// filesystem operations that can be executed on directories. -class FSDirectory { - public: - virtual ~FSDirectory() {} - // Fsync directory. Can be called concurrently from multiple threads. - virtual IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) = 0; - - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { - return 0; - } - - // If you're adding methods here, remember to add them to - // DirectoryWrapper too. -}; - -// Below are helpers for wrapping most of the classes in this file. -// They forward all calls to another instance of the class. -// Useful when wrapping the default implementations. -// Typical usage is to inherit your wrapper from *Wrapper, e.g.: -// -// class MySequentialFileWrapper : public -// ROCKSDB_NAMESPACE::FSSequentialFileWrapper { -// public: -// MySequentialFileWrapper(ROCKSDB_NAMESPACE::FSSequentialFile* target): -// ROCKSDB_NAMESPACE::FSSequentialFileWrapper(target) {} -// Status Read(size_t n, FileSystem::IOOptions& options, Slice* result, -// char* scratch, FileSystem::IODebugContext* dbg) override { -// cout << "Doing a read of size " << n << "!" << endl; -// return ROCKSDB_NAMESPACE::FSSequentialFileWrapper::Read(n, options, -// result, -// scratch, dbg); -// } -// // All other methods are forwarded to target_ automatically. -// }; -// -// This is often more convenient than inheriting the class directly because -// (a) Don't have to override and forward all methods - the Wrapper will -// forward everything you're not explicitly overriding. -// (b) Don't need to update the wrapper when more methods are added to the -// rocksdb class. Unless you actually want to override the behavior. -// (And unless rocksdb people forgot to update the *Wrapper class.) - -// An implementation of Env that forwards all calls to another Env. -// May be useful to clients who wish to override just part of the -// functionality of another Env. -class FileSystemWrapper : public FileSystem { - public: - // Initialize an EnvWrapper that delegates all calls to *t - explicit FileSystemWrapper(std::shared_ptr t) : target_(t) {} - ~FileSystemWrapper() override {} - - const char* Name() const override { return target_->Name(); } - - // Return the target to which this Env forwards all calls - FileSystem* target() const { return target_.get(); } - - // The following text is boilerplate that forwards all methods to target() - IOStatus NewSequentialFile(const std::string& f, - const FileOptions& file_opts, - std::unique_ptr* r, - IODebugContext* dbg) override { - return target_->NewSequentialFile(f, file_opts, r, dbg); - } - IOStatus NewRandomAccessFile(const std::string& f, - const FileOptions& file_opts, - std::unique_ptr* r, - IODebugContext* dbg) override { - return target_->NewRandomAccessFile(f, file_opts, r, dbg); - } - IOStatus NewWritableFile(const std::string& f, const FileOptions& file_opts, - std::unique_ptr* r, - IODebugContext* dbg) override { - return target_->NewWritableFile(f, file_opts, r, dbg); - } - IOStatus ReopenWritableFile(const std::string& fname, - const FileOptions& file_opts, - std::unique_ptr* result, - IODebugContext* dbg) override { - return target_->ReopenWritableFile(fname, file_opts, result, dbg); - } - IOStatus ReuseWritableFile(const std::string& fname, - const std::string& old_fname, - const FileOptions& file_opts, - std::unique_ptr* r, - IODebugContext* dbg) override { - return target_->ReuseWritableFile(fname, old_fname, file_opts, r, - dbg); - } - IOStatus NewRandomRWFile(const std::string& fname, - const FileOptions& file_opts, - std::unique_ptr* result, - IODebugContext* dbg) override { - return target_->NewRandomRWFile(fname, file_opts, result, dbg); - } - IOStatus NewMemoryMappedFileBuffer( - const std::string& fname, - std::unique_ptr* result) override { - return target_->NewMemoryMappedFileBuffer(fname, result); - } - IOStatus NewDirectory(const std::string& name, const IOOptions& io_opts, - std::unique_ptr* result, - IODebugContext* dbg) override { - return target_->NewDirectory(name, io_opts, result, dbg); - } - IOStatus FileExists(const std::string& f, const IOOptions& io_opts, - IODebugContext* dbg) override { - return target_->FileExists(f, io_opts, dbg); - } - IOStatus GetChildren(const std::string& dir, const IOOptions& io_opts, - std::vector* r, - IODebugContext* dbg) override { - return target_->GetChildren(dir, io_opts, r, dbg); - } - IOStatus GetChildrenFileAttributes(const std::string& dir, - const IOOptions& options, - std::vector* result, - IODebugContext* dbg) override { - return target_->GetChildrenFileAttributes(dir, options, result, dbg); - } - IOStatus DeleteFile(const std::string& f, const IOOptions& options, - IODebugContext* dbg) override { - return target_->DeleteFile(f, options, dbg); - } - IOStatus Truncate(const std::string& fname, size_t size, - const IOOptions& options, IODebugContext* dbg) override { - return target_->Truncate(fname, size, options, dbg); - } - IOStatus CreateDir(const std::string& d, const IOOptions& options, - IODebugContext* dbg) override { - return target_->CreateDir(d, options, dbg); - } - IOStatus CreateDirIfMissing(const std::string& d, const IOOptions& options, - IODebugContext* dbg) override { - return target_->CreateDirIfMissing(d, options, dbg); - } - IOStatus DeleteDir(const std::string& d, const IOOptions& options, - IODebugContext* dbg) override { - return target_->DeleteDir(d, options, dbg); - } - IOStatus GetFileSize(const std::string& f, const IOOptions& options, - uint64_t* s, IODebugContext* dbg) override { - return target_->GetFileSize(f, options, s, dbg); - } - - IOStatus GetFileModificationTime(const std::string& fname, - const IOOptions& options, - uint64_t* file_mtime, - IODebugContext* dbg) override { - return target_->GetFileModificationTime(fname, options, file_mtime, dbg); - } - - IOStatus GetAbsolutePath(const std::string& db_path, const IOOptions& options, - std::string* output_path, - IODebugContext* dbg) override { - return target_->GetAbsolutePath(db_path, options, output_path, dbg); - } - - IOStatus RenameFile(const std::string& s, const std::string& t, - const IOOptions& options, IODebugContext* dbg) override { - return target_->RenameFile(s, t, options, dbg); - } - - IOStatus LinkFile(const std::string& s, const std::string& t, - const IOOptions& options, IODebugContext* dbg) override { - return target_->LinkFile(s, t, options, dbg); - } - - IOStatus NumFileLinks(const std::string& fname, const IOOptions& options, - uint64_t* count, IODebugContext* dbg) override { - return target_->NumFileLinks(fname, options, count, dbg); - } - - IOStatus AreFilesSame(const std::string& first, const std::string& second, - const IOOptions& options, bool* res, - IODebugContext* dbg) override { - return target_->AreFilesSame(first, second, options, res, dbg); - } - - IOStatus LockFile(const std::string& f, const IOOptions& options, - FileLock** l, IODebugContext* dbg) override { - return target_->LockFile(f, options, l, dbg); - } - - IOStatus UnlockFile(FileLock* l, const IOOptions& options, - IODebugContext* dbg) override { - return target_->UnlockFile(l, options, dbg); - } - - IOStatus GetTestDirectory(const IOOptions& options, std::string* path, - IODebugContext* dbg) override { - return target_->GetTestDirectory(options, path, dbg); - } - IOStatus NewLogger(const std::string& fname, const IOOptions& options, - std::shared_ptr* result, - IODebugContext* dbg) override { - return target_->NewLogger(fname, options, result, dbg); - } - - void SanitizeFileOptions(FileOptions* opts) const override { - target_->SanitizeFileOptions(opts); - } - - FileOptions OptimizeForLogRead( - const FileOptions& file_options) const override { - return target_->OptimizeForLogRead(file_options); - } - FileOptions OptimizeForManifestRead( - const FileOptions& file_options) const override { - return target_->OptimizeForManifestRead(file_options); - } - FileOptions OptimizeForLogWrite(const FileOptions& file_options, - const DBOptions& db_options) const override { - return target_->OptimizeForLogWrite(file_options, db_options); - } - FileOptions OptimizeForManifestWrite( - const FileOptions& file_options) const override { - return target_->OptimizeForManifestWrite(file_options); - } - FileOptions OptimizeForCompactionTableWrite( - const FileOptions& file_options, - const ImmutableDBOptions& immutable_ops) const override { - return target_->OptimizeForCompactionTableWrite(file_options, - immutable_ops); - } - FileOptions OptimizeForCompactionTableRead( - const FileOptions& file_options, - const ImmutableDBOptions& db_options) const override { - return target_->OptimizeForCompactionTableRead(file_options, db_options); - } - IOStatus GetFreeSpace(const std::string& path, const IOOptions& options, - uint64_t* diskfree, IODebugContext* dbg) override { - return target_->GetFreeSpace(path, options, diskfree, dbg); - } - IOStatus IsDirectory(const std::string& path, const IOOptions& options, - bool* is_dir, IODebugContext* dbg) override { - return target_->IsDirectory(path, options, is_dir, dbg); - } - - private: - std::shared_ptr target_; -}; - -class FSSequentialFileWrapper : public FSSequentialFile { - public: - explicit FSSequentialFileWrapper(FSSequentialFile* t) : target_(t) {} - - FSSequentialFile* target() const { return target_; } - - IOStatus Read(size_t n, const IOOptions& options, Slice* result, - char* scratch, IODebugContext* dbg) override { - return target_->Read(n, options, result, scratch, dbg); - } - IOStatus Skip(uint64_t n) override { return target_->Skip(n); } - bool use_direct_io() const override { return target_->use_direct_io(); } - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - IOStatus InvalidateCache(size_t offset, size_t length) override { - return target_->InvalidateCache(offset, length); - } - IOStatus PositionedRead(uint64_t offset, size_t n, const IOOptions& options, - Slice* result, char* scratch, - IODebugContext* dbg) override { - return target_->PositionedRead(offset, n, options, result, scratch, dbg); - } - - private: - FSSequentialFile* target_; -}; - -class FSRandomAccessFileWrapper : public FSRandomAccessFile { - public: - explicit FSRandomAccessFileWrapper(FSRandomAccessFile* t) : target_(t) {} - - FSRandomAccessFile* target() const { return target_; } - - IOStatus Read(uint64_t offset, size_t n, const IOOptions& options, - Slice* result, char* scratch, - IODebugContext* dbg) const override { - return target_->Read(offset, n, options, result, scratch, dbg); - } - IOStatus MultiRead(FSReadRequest* reqs, size_t num_reqs, - const IOOptions& options, IODebugContext* dbg) override { - return target_->MultiRead(reqs, num_reqs, options, dbg); - } - IOStatus Prefetch(uint64_t offset, size_t n, const IOOptions& options, - IODebugContext* dbg) override { - return target_->Prefetch(offset, n, options, dbg); - } - size_t GetUniqueId(char* id, size_t max_size) const override { - return target_->GetUniqueId(id, max_size); - }; - void Hint(AccessPattern pattern) override { target_->Hint(pattern); } - bool use_direct_io() const override { return target_->use_direct_io(); } - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - IOStatus InvalidateCache(size_t offset, size_t length) override { - return target_->InvalidateCache(offset, length); - } - - private: - FSRandomAccessFile* target_; -}; - -class FSWritableFileWrapper : public FSWritableFile { - public: - explicit FSWritableFileWrapper(FSWritableFile* t) : target_(t) {} - - FSWritableFile* target() const { return target_; } - - IOStatus Append(const Slice& data, const IOOptions& options, - IODebugContext* dbg) override { - return target_->Append(data, options, dbg); - } - IOStatus Append(const Slice& data, const IOOptions& options, - const DataVerificationInfo& verification_info, - IODebugContext* dbg) override { - return target_->Append(data, options, verification_info, dbg); - } - IOStatus PositionedAppend(const Slice& data, uint64_t offset, - const IOOptions& options, - IODebugContext* dbg) override { - return target_->PositionedAppend(data, offset, options, dbg); - } - IOStatus PositionedAppend(const Slice& data, uint64_t offset, - const IOOptions& options, - const DataVerificationInfo& verification_info, - IODebugContext* dbg) override { - return target_->PositionedAppend(data, offset, options, verification_info, - dbg); - } - IOStatus Truncate(uint64_t size, const IOOptions& options, - IODebugContext* dbg) override { - return target_->Truncate(size, options, dbg); - } - IOStatus Close(const IOOptions& options, IODebugContext* dbg) override { - return target_->Close(options, dbg); - } - IOStatus Flush(const IOOptions& options, IODebugContext* dbg) override { - return target_->Flush(options, dbg); - } - IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override { - return target_->Sync(options, dbg); - } - IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override { - return target_->Fsync(options, dbg); - } - bool IsSyncThreadSafe() const override { return target_->IsSyncThreadSafe(); } - - bool use_direct_io() const override { return target_->use_direct_io(); } - - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - - void SetWriteLifeTimeHint(Env::WriteLifeTimeHint hint) override { - target_->SetWriteLifeTimeHint(hint); - } - - Env::WriteLifeTimeHint GetWriteLifeTimeHint() override { - return target_->GetWriteLifeTimeHint(); - } - - uint64_t GetFileSize(const IOOptions& options, IODebugContext* dbg) override { - return target_->GetFileSize(options, dbg); - } - - void SetPreallocationBlockSize(size_t size) override { - target_->SetPreallocationBlockSize(size); - } - - void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) override { - target_->GetPreallocationStatus(block_size, last_allocated_block); - } - - size_t GetUniqueId(char* id, size_t max_size) const override { - return target_->GetUniqueId(id, max_size); - } - - IOStatus InvalidateCache(size_t offset, size_t length) override { - return target_->InvalidateCache(offset, length); - } - - IOStatus RangeSync(uint64_t offset, uint64_t nbytes, const IOOptions& options, - IODebugContext* dbg) override { - return target_->RangeSync(offset, nbytes, options, dbg); - } - - void PrepareWrite(size_t offset, size_t len, const IOOptions& options, - IODebugContext* dbg) override { - target_->PrepareWrite(offset, len, options, dbg); - } - - IOStatus Allocate(uint64_t offset, uint64_t len, const IOOptions& options, - IODebugContext* dbg) override { - return target_->Allocate(offset, len, options, dbg); - } - - private: - FSWritableFile* target_; -}; - -class FSRandomRWFileWrapper : public FSRandomRWFile { - public: - explicit FSRandomRWFileWrapper(FSRandomRWFile* t) : target_(t) {} - - FSRandomRWFile* target() const { return target_; } - - bool use_direct_io() const override { return target_->use_direct_io(); } - size_t GetRequiredBufferAlignment() const override { - return target_->GetRequiredBufferAlignment(); - } - IOStatus Write(uint64_t offset, const Slice& data, const IOOptions& options, - IODebugContext* dbg) override { - return target_->Write(offset, data, options, dbg); - } - IOStatus Read(uint64_t offset, size_t n, const IOOptions& options, - Slice* result, char* scratch, - IODebugContext* dbg) const override { - return target_->Read(offset, n, options, result, scratch, dbg); - } - IOStatus Flush(const IOOptions& options, IODebugContext* dbg) override { - return target_->Flush(options, dbg); - } - IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override { - return target_->Sync(options, dbg); - } - IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override { - return target_->Fsync(options, dbg); - } - IOStatus Close(const IOOptions& options, IODebugContext* dbg) override { - return target_->Close(options, dbg); - } - - private: - FSRandomRWFile* target_; -}; - -class FSDirectoryWrapper : public FSDirectory { - public: - explicit FSDirectoryWrapper(FSDirectory* t) : target_(t) {} - - IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override { - return target_->Fsync(options, dbg); - } - size_t GetUniqueId(char* id, size_t max_size) const override { - return target_->GetUniqueId(id, max_size); - } - - private: - FSDirectory* target_; -}; - -// A utility routine: write "data" to the named file. -extern IOStatus WriteStringToFile(FileSystem* fs, const Slice& data, - const std::string& fname, - bool should_sync = false); - -// A utility routine: read contents of named file into *data -extern IOStatus ReadFileToString(FileSystem* fs, const std::string& fname, - std::string* data); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/filter_policy.h b/dist/darwin_amd64/include/rocksdb/filter_policy.h deleted file mode 100644 index 3cd85a2..0000000 --- a/dist/darwin_amd64/include/rocksdb/filter_policy.h +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2012 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// A database can be configured with a custom FilterPolicy object. -// This object is responsible for creating a small filter from a set -// of keys. These filters are stored in rocksdb and are consulted -// automatically by rocksdb to decide whether or not to read some -// information from disk. In many cases, a filter can cut down the -// number of disk seeks form a handful to a single disk seek per -// DB::Get() call. -// -// Most people will want to use the builtin bloom filter support (see -// NewBloomFilterPolicy() below). - -#pragma once - -#include - -#include -#include -#include -#include - -#include "rocksdb/advanced_options.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -struct BlockBasedTableOptions; -struct ConfigOptions; - -// A class that takes a bunch of keys, then generates filter -class FilterBitsBuilder { - public: - virtual ~FilterBitsBuilder() {} - - // Add Key to filter, you could use any way to store the key. - // Such as: storing hashes or original keys - // Keys are in sorted order and duplicated keys are possible. - virtual void AddKey(const Slice& key) = 0; - - // Generate the filter using the keys that are added - // The return value of this function would be the filter bits, - // The ownership of actual data is set to buf - virtual Slice Finish(std::unique_ptr* buf) = 0; - - // Calculate num of keys that can be added and generate a filter - // <= the specified number of bytes. -#if defined(_MSC_VER) -#pragma warning(push) -#pragma warning(disable : 4702) // unreachable code -#endif - virtual int CalculateNumEntry(const uint32_t /*bytes*/) { -#ifndef ROCKSDB_LITE - throw std::runtime_error("CalculateNumEntry not Implemented"); -#else - abort(); -#endif - return 0; - } -#if defined(_MSC_VER) -#pragma warning(pop) -#endif -}; - -// A class that checks if a key can be in filter -// It should be initialized by Slice generated by BitsBuilder -class FilterBitsReader { - public: - virtual ~FilterBitsReader() {} - - // Check if the entry match the bits in filter - virtual bool MayMatch(const Slice& entry) = 0; - - // Check if an array of entries match the bits in filter - virtual void MayMatch(int num_keys, Slice** keys, bool* may_match) { - for (int i = 0; i < num_keys; ++i) { - may_match[i] = MayMatch(*keys[i]); - } - } -}; - -// Contextual information passed to BloomFilterPolicy at filter building time. -// Used in overriding FilterPolicy::GetBuilderWithContext(). References other -// structs because this is expected to be a temporary, stack-allocated object. -struct FilterBuildingContext { - // This constructor is for internal use only and subject to change. - FilterBuildingContext(const BlockBasedTableOptions& table_options); - - // Options for the table being built - const BlockBasedTableOptions& table_options; - - // Name of the column family for the table (or empty string if unknown) - std::string column_family_name; - - // The compactions style in effect for the table - CompactionStyle compaction_style = kCompactionStyleLevel; - - // The table level at time of constructing the SST file, or -1 if unknown. - // (The table file could later be used at a different level.) - int level_at_creation = -1; - - // An optional logger for reporting errors, warnings, etc. - Logger* info_log = nullptr; -}; - -// We add a new format of filter block called full filter block -// This new interface gives you more space of customization -// -// For the full filter block, you can plug in your version by implement -// the FilterBitsBuilder and FilterBitsReader -// -// There are two sets of interface in FilterPolicy -// Set 1: CreateFilter, KeyMayMatch: used for blockbased filter -// Set 2: GetFilterBitsBuilder, GetFilterBitsReader, they are used for -// full filter. -// Set 1 MUST be implemented correctly, Set 2 is optional -// RocksDB would first try using functions in Set 2. if they return nullptr, -// it would use Set 1 instead. -// You can choose filter type in NewBloomFilterPolicy -class FilterPolicy { - public: - virtual ~FilterPolicy(); - - // Creates a new FilterPolicy based on the input value string and returns the - // result The value might be an ID, and ID with properties, or an old-style - // policy string. - // The value describes the FilterPolicy being created. - // For BloomFilters, value may be a ":"-delimited value of the form: - // "bloomfilter:[bits_per_key]:[use_block_based_builder]", - // e.g. ""bloomfilter:4:true" - // The above string is equivalent to calling NewBloomFilterPolicy(4, true). - static Status CreateFromString(const ConfigOptions& config_options, - const std::string& value, - std::shared_ptr* result); - - // Return the name of this policy. Note that if the filter encoding - // changes in an incompatible way, the name returned by this method - // must be changed. Otherwise, old incompatible filters may be - // passed to methods of this type. - virtual const char* Name() const = 0; - - // keys[0,n-1] contains a list of keys (potentially with duplicates) - // that are ordered according to the user supplied comparator. - // Append a filter that summarizes keys[0,n-1] to *dst. - // - // Warning: do not change the initial contents of *dst. Instead, - // append the newly constructed filter to *dst. - virtual void CreateFilter(const Slice* keys, int n, - std::string* dst) const = 0; - - // "filter" contains the data appended by a preceding call to - // CreateFilter() on this class. This method must return true if - // the key was in the list of keys passed to CreateFilter(). - // This method may return true or false if the key was not on the - // list, but it should aim to return false with a high probability. - virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0; - - // Return a new FilterBitsBuilder for full or partitioned filter blocks, or - // nullptr if using block-based filter. - // NOTE: This function is only called by GetBuilderWithContext() below for - // custom FilterPolicy implementations. Thus, it is not necessary to - // override this function if overriding GetBuilderWithContext(). - virtual FilterBitsBuilder* GetFilterBitsBuilder() const { return nullptr; } - - // A newer variant of GetFilterBitsBuilder that allows a FilterPolicy - // to customize the builder for contextual constraints and hints. - // (Name changed to avoid triggering -Werror=overloaded-virtual.) - // If overriding GetFilterBitsBuilder() suffices, it is not necessary to - // override this function. - virtual FilterBitsBuilder* GetBuilderWithContext( - const FilterBuildingContext&) const { - return GetFilterBitsBuilder(); - } - - // Return a new FilterBitsReader for full or partitioned filter blocks, or - // nullptr if using block-based filter. - // As here, the input slice should NOT be deleted by FilterPolicy. - virtual FilterBitsReader* GetFilterBitsReader( - const Slice& /*contents*/) const { - return nullptr; - } -}; - -// Return a new filter policy that uses a bloom filter with approximately -// the specified number of bits per key. -// -// bits_per_key: average bits allocated per key in bloom filter. A good -// choice is 9.9, which yields a filter with ~ 1% false positive rate. -// When format_version < 5, the value will be rounded to the nearest -// integer. Recommend using no more than three decimal digits after the -// decimal point, as in 6.667. -// -// use_block_based_builder: use deprecated block based filter (true) rather -// than full or partitioned filter (false). -// -// Callers must delete the result after any database that is using the -// result has been closed. -// -// Note: if you are using a custom comparator that ignores some parts -// of the keys being compared, you must not use NewBloomFilterPolicy() -// and must provide your own FilterPolicy that also ignores the -// corresponding parts of the keys. For example, if the comparator -// ignores trailing spaces, it would be incorrect to use a -// FilterPolicy (like NewBloomFilterPolicy) that does not ignore -// trailing spaces in keys. -extern const FilterPolicy* NewBloomFilterPolicy( - double bits_per_key, bool use_block_based_builder = false); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/flush_block_policy.h b/dist/darwin_amd64/include/rocksdb/flush_block_policy.h deleted file mode 100644 index badc080..0000000 --- a/dist/darwin_amd64/include/rocksdb/flush_block_policy.h +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include "rocksdb/table.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -class BlockBuilder; -struct Options; - -// FlushBlockPolicy provides a configurable way to determine when to flush a -// block in the block based tables, -class FlushBlockPolicy { - public: - // Keep track of the key/value sequences and return the boolean value to - // determine if table builder should flush current data block. - virtual bool Update(const Slice& key, const Slice& value) = 0; - - virtual ~FlushBlockPolicy() {} -}; - -class FlushBlockPolicyFactory { - public: - // Return the name of the flush block policy. - virtual const char* Name() const = 0; - - // Return a new block flush policy that flushes data blocks by data size. - // FlushBlockPolicy may need to access the metadata of the data block - // builder to determine when to flush the blocks. - // - // Callers must delete the result after any database that is using the - // result has been closed. - virtual FlushBlockPolicy* NewFlushBlockPolicy( - const BlockBasedTableOptions& table_options, - const BlockBuilder& data_block_builder) const = 0; - - virtual ~FlushBlockPolicyFactory() {} -}; - -class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory { - public: - FlushBlockBySizePolicyFactory() {} - - const char* Name() const override { return "FlushBlockBySizePolicyFactory"; } - - FlushBlockPolicy* NewFlushBlockPolicy( - const BlockBasedTableOptions& table_options, - const BlockBuilder& data_block_builder) const override; - - static FlushBlockPolicy* NewFlushBlockPolicy( - const uint64_t size, const int deviation, - const BlockBuilder& data_block_builder); -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/io_status.h b/dist/darwin_amd64/include/rocksdb/io_status.h deleted file mode 100644 index ea13d3b..0000000 --- a/dist/darwin_amd64/include/rocksdb/io_status.h +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (c) 2019-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// An IOStatus encapsulates the result of an operation. It may indicate -// success, or it may indicate an error with an associated error message. -// -// Multiple threads can invoke const methods on an IOStatus without -// external synchronization, but if any of the threads may call a -// non-const method, all threads accessing the same IOStatus must use -// external synchronization. - -#pragma once - -#include -#include "rocksdb/slice.h" -#ifdef OS_WIN -#include -#endif -#include -#include "status.h" - -namespace ROCKSDB_NAMESPACE { - -class IOStatus : public Status { - public: - using Code = Status::Code; - using SubCode = Status::SubCode; - - enum IOErrorScope { - kIOErrorScopeFileSystem, - kIOErrorScopeFile, - kIOErrorScopeRange, - kIOErrorScopeMax, - }; - - // Create a success status. - IOStatus() : IOStatus(kOk, kNone) {} - ~IOStatus() {} - - // Copy the specified status. - IOStatus(const IOStatus& s); - IOStatus& operator=(const IOStatus& s); - IOStatus(IOStatus&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif - ; - IOStatus& operator=(IOStatus&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif - ; - bool operator==(const IOStatus& rhs) const; - bool operator!=(const IOStatus& rhs) const; - - void SetRetryable(bool retryable) { retryable_ = retryable; } - void SetDataLoss(bool data_loss) { data_loss_ = data_loss; } - void SetScope(IOErrorScope scope) { scope_ = scope; } - - bool GetRetryable() const { return retryable_; } - bool GetDataLoss() const { return data_loss_; } - IOErrorScope GetScope() const { return scope_; } - - // Return a success status. - static IOStatus OK() { return IOStatus(); } - - static IOStatus NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kNotSupported, msg, msg2); - } - static IOStatus NotSupported(SubCode msg = kNone) { - return IOStatus(kNotSupported, msg); - } - - // Return error status of an appropriate type. - static IOStatus NotFound(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kNotFound, msg, msg2); - } - // Fast path for not found without malloc; - static IOStatus NotFound(SubCode msg = kNone) { - return IOStatus(kNotFound, msg); - } - - static IOStatus Corruption(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kCorruption, msg, msg2); - } - static IOStatus Corruption(SubCode msg = kNone) { - return IOStatus(kCorruption, msg); - } - - static IOStatus InvalidArgument(const Slice& msg, - const Slice& msg2 = Slice()) { - return IOStatus(kInvalidArgument, msg, msg2); - } - static IOStatus InvalidArgument(SubCode msg = kNone) { - return IOStatus(kInvalidArgument, msg); - } - - static IOStatus IOError(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kIOError, msg, msg2); - } - static IOStatus IOError(SubCode msg = kNone) { - return IOStatus(kIOError, msg); - } - - static IOStatus Busy(SubCode msg = kNone) { return IOStatus(kBusy, msg); } - static IOStatus Busy(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kBusy, msg, msg2); - } - - static IOStatus TimedOut(SubCode msg = kNone) { - return IOStatus(kTimedOut, msg); - } - static IOStatus TimedOut(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kTimedOut, msg, msg2); - } - - static IOStatus NoSpace() { return IOStatus(kIOError, kNoSpace); } - static IOStatus NoSpace(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kIOError, kNoSpace, msg, msg2); - } - - static IOStatus PathNotFound() { return IOStatus(kIOError, kPathNotFound); } - static IOStatus PathNotFound(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kIOError, kPathNotFound, msg, msg2); - } - - static IOStatus IOFenced() { return IOStatus(kIOError, kIOFenced); } - static IOStatus IOFenced(const Slice& msg, const Slice& msg2 = Slice()) { - return IOStatus(kIOError, kIOFenced, msg, msg2); - } - - // Return a string representation of this status suitable for printing. - // Returns the string "OK" for success. - // std::string ToString() const; - - private: - friend IOStatus status_to_io_status(Status&&); - bool retryable_; - bool data_loss_; - IOErrorScope scope_; - - explicit IOStatus(Code _code, SubCode _subcode = kNone) - : Status(_code, _subcode), - retryable_(false), - data_loss_(false), - scope_(kIOErrorScopeFileSystem) {} - - IOStatus(Code _code, SubCode _subcode, const Slice& msg, const Slice& msg2); - IOStatus(Code _code, const Slice& msg, const Slice& msg2) - : IOStatus(_code, kNone, msg, msg2) {} -}; - -inline IOStatus::IOStatus(Code _code, SubCode _subcode, const Slice& msg, - const Slice& msg2) - : Status(_code, _subcode), - retryable_(false), - data_loss_(false), - scope_(kIOErrorScopeFileSystem) { - assert(code_ != kOk); - assert(subcode_ != kMaxSubCode); - const size_t len1 = msg.size(); - const size_t len2 = msg2.size(); - const size_t size = len1 + (len2 ? (2 + len2) : 0); - char* const result = new char[size + 1]; // +1 for null terminator - memcpy(result, msg.data(), len1); - if (len2) { - result[len1] = ':'; - result[len1 + 1] = ' '; - memcpy(result + len1 + 2, msg2.data(), len2); - } - result[size] = '\0'; // null terminator for C style string - state_ = result; -} - -inline IOStatus::IOStatus(const IOStatus& s) : Status(s.code_, s.subcode_) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - retryable_ = s.retryable_; - data_loss_ = s.data_loss_; - scope_ = s.scope_; - state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); -} -inline IOStatus& IOStatus::operator=(const IOStatus& s) { - // The following condition catches both aliasing (when this == &s), - // and the common case where both s and *this are ok. - if (this != &s) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; - checked_ = false; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - code_ = s.code_; - subcode_ = s.subcode_; - retryable_ = s.retryable_; - data_loss_ = s.data_loss_; - scope_ = s.scope_; - delete[] state_; - state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); - } - return *this; -} - -inline IOStatus::IOStatus(IOStatus&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif - : IOStatus() { - *this = std::move(s); -} - -inline IOStatus& IOStatus::operator=(IOStatus&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif -{ - if (this != &s) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; - checked_ = false; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - code_ = std::move(s.code_); - s.code_ = kOk; - subcode_ = std::move(s.subcode_); - s.subcode_ = kNone; - retryable_ = s.retryable_; - data_loss_ = s.data_loss_; - scope_ = s.scope_; - s.scope_ = kIOErrorScopeFileSystem; - delete[] state_; - state_ = nullptr; - std::swap(state_, s.state_); - } - return *this; -} - -inline bool IOStatus::operator==(const IOStatus& rhs) const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; - rhs.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code_ == rhs.code_); -} - -inline bool IOStatus::operator!=(const IOStatus& rhs) const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; - rhs.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return !(*this == rhs); -} - -inline IOStatus status_to_io_status(Status&& status) { - if (status.ok()) { - // Fast path - return IOStatus::OK(); - } else { - const char* state = status.getState(); - if (state) { - return IOStatus(status.code(), status.subcode(), - Slice(state, strlen(status.getState()) + 1), Slice()); - } else { - return IOStatus(status.code(), status.subcode()); - } - } -} - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/iostats_context.h b/dist/darwin_amd64/include/rocksdb/iostats_context.h deleted file mode 100644 index b31b6d7..0000000 --- a/dist/darwin_amd64/include/rocksdb/iostats_context.h +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#pragma once - -#include -#include - -#include "rocksdb/perf_level.h" - -// A thread local context for gathering io-stats efficiently and transparently. -// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats. - -namespace ROCKSDB_NAMESPACE { - -struct IOStatsContext { - // reset all io-stats counter to zero - void Reset(); - - std::string ToString(bool exclude_zero_counters = false) const; - - // the thread pool id - uint64_t thread_pool_id; - - // number of bytes that has been written. - uint64_t bytes_written; - // number of bytes that has been read. - uint64_t bytes_read; - - // time spent in open() and fopen(). - uint64_t open_nanos; - // time spent in fallocate(). - uint64_t allocate_nanos; - // time spent in write() and pwrite(). - uint64_t write_nanos; - // time spent in read() and pread() - uint64_t read_nanos; - // time spent in sync_file_range(). - uint64_t range_sync_nanos; - // time spent in fsync - uint64_t fsync_nanos; - // time spent in preparing write (fallocate etc). - uint64_t prepare_write_nanos; - // time spent in Logger::Logv(). - uint64_t logger_nanos; - // CPU time spent in write() and pwrite() - uint64_t cpu_write_nanos; - // CPU time spent in read() and pread() - uint64_t cpu_read_nanos; -}; - -// Get Thread-local IOStatsContext object pointer -IOStatsContext* get_iostats_context(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/iterator.h b/dist/darwin_amd64/include/rocksdb/iterator.h deleted file mode 100644 index aa2f2a3..0000000 --- a/dist/darwin_amd64/include/rocksdb/iterator.h +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// An iterator yields a sequence of key/value pairs from a source. -// The following class defines the interface. Multiple implementations -// are provided by this library. In particular, iterators are provided -// to access the contents of a Table or a DB. -// -// Multiple threads can invoke const methods on an Iterator without -// external synchronization, but if any of the threads may call a -// non-const method, all threads accessing the same Iterator must use -// external synchronization. - -#pragma once - -#include -#include "rocksdb/cleanable.h" -#include "rocksdb/slice.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class Iterator : public Cleanable { - public: - Iterator() {} - // No copying allowed - Iterator(const Iterator&) = delete; - void operator=(const Iterator&) = delete; - - virtual ~Iterator() {} - - // An iterator is either positioned at a key/value pair, or - // not valid. This method returns true iff the iterator is valid. - // Always returns false if !status().ok(). - virtual bool Valid() const = 0; - - // Position at the first key in the source. The iterator is Valid() - // after this call iff the source is not empty. - virtual void SeekToFirst() = 0; - - // Position at the last key in the source. The iterator is - // Valid() after this call iff the source is not empty. - // Currently incompatible with user timestamp. - virtual void SeekToLast() = 0; - - // Position at the first key in the source that at or past target. - // The iterator is Valid() after this call iff the source contains - // an entry that comes at or past target. - // All Seek*() methods clear any error status() that the iterator had prior to - // the call; after the seek, status() indicates only the error (if any) that - // happened during the seek, not any past errors. - // Target does not contain timestamp. - virtual void Seek(const Slice& target) = 0; - - // Position at the last key in the source that at or before target. - // The iterator is Valid() after this call iff the source contains - // an entry that comes at or before target. - // Currently incompatible with user timestamp. - virtual void SeekForPrev(const Slice& target) = 0; - - // Moves to the next entry in the source. After this call, Valid() is - // true iff the iterator was not positioned at the last entry in the source. - // REQUIRES: Valid() - virtual void Next() = 0; - - // Moves to the previous entry in the source. After this call, Valid() is - // true iff the iterator was not positioned at the first entry in source. - // Currently incompatible with user timestamp. - // REQUIRES: Valid() - virtual void Prev() = 0; - - // Return the key for the current entry. The underlying storage for - // the returned slice is valid only until the next modification of - // the iterator. - // REQUIRES: Valid() - virtual Slice key() const = 0; - - // Return the value for the current entry. The underlying storage for - // the returned slice is valid only until the next modification of - // the iterator. - // REQUIRES: Valid() - virtual Slice value() const = 0; - - // If an error has occurred, return it. Else return an ok status. - // If non-blocking IO is requested and this operation cannot be - // satisfied without doing some IO, then this returns Status::Incomplete(). - virtual Status status() const = 0; - - // If supported, renew the iterator to represent the latest state. The - // iterator will be invalidated after the call. Not supported if - // ReadOptions.snapshot is given when creating the iterator. - virtual Status Refresh() { - return Status::NotSupported("Refresh() is not supported"); - } - - // Property "rocksdb.iterator.is-key-pinned": - // If returning "1", this means that the Slice returned by key() is valid - // as long as the iterator is not deleted. - // It is guaranteed to always return "1" if - // - Iterator created with ReadOptions::pin_data = true - // - DB tables were created with - // BlockBasedTableOptions::use_delta_encoding = false. - // Property "rocksdb.iterator.super-version-number": - // LSM version used by the iterator. The same format as DB Property - // kCurrentSuperVersionNumber. See its comment for more information. - // Property "rocksdb.iterator.internal-key": - // Get the user-key portion of the internal key at which the iteration - // stopped. - virtual Status GetProperty(std::string prop_name, std::string* prop); - - virtual Slice timestamp() const { - assert(false); - return Slice(); - } -}; - -// Return an empty iterator (yields nothing). -extern Iterator* NewEmptyIterator(); - -// Return an empty iterator with the specified status. -extern Iterator* NewErrorIterator(const Status& status); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/ldb_tool.h b/dist/darwin_amd64/include/rocksdb/ldb_tool.h deleted file mode 100644 index 22ea773..0000000 --- a/dist/darwin_amd64/include/rocksdb/ldb_tool.h +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#pragma once -#ifndef ROCKSDB_LITE -#include -#include -#include "rocksdb/db.h" -#include "rocksdb/options.h" - -namespace ROCKSDB_NAMESPACE { - -// An interface for converting a slice to a readable string -class SliceFormatter { - public: - virtual ~SliceFormatter() {} - virtual std::string Format(const Slice& s) const = 0; -}; - -// Options for customizing ldb tool (beyond the DB Options) -struct LDBOptions { - // Create LDBOptions with default values for all fields - LDBOptions(); - - // Key formatter that converts a slice to a readable string. - // Default: Slice::ToString() - std::shared_ptr key_formatter; - - std::string print_help_header = "ldb - RocksDB Tool"; -}; - -class LDBTool { - public: - void Run( - int argc, char** argv, Options db_options = Options(), - const LDBOptions& ldb_options = LDBOptions(), - const std::vector* column_families = nullptr); -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/listener.h b/dist/darwin_amd64/include/rocksdb/listener.h deleted file mode 100644 index e90a870..0000000 --- a/dist/darwin_amd64/include/rocksdb/listener.h +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright (c) 2014 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -#pragma once - -#include -#include -#include -#include -#include - -#include "rocksdb/compaction_job_stats.h" -#include "rocksdb/compression_type.h" -#include "rocksdb/status.h" -#include "rocksdb/table_properties.h" - -namespace ROCKSDB_NAMESPACE { - -typedef std::unordered_map> - TablePropertiesCollection; - -class DB; -class ColumnFamilyHandle; -class Status; -struct CompactionJobStats; - -enum class TableFileCreationReason { - kFlush, - kCompaction, - kRecovery, - kMisc, -}; - -struct TableFileCreationBriefInfo { - // the name of the database where the file was created - std::string db_name; - // the name of the column family where the file was created. - std::string cf_name; - // the path to the created file. - std::string file_path; - // the id of the job (which could be flush or compaction) that - // created the file. - int job_id; - // reason of creating the table. - TableFileCreationReason reason; -}; - -struct TableFileCreationInfo : public TableFileCreationBriefInfo { - TableFileCreationInfo() = default; - explicit TableFileCreationInfo(TableProperties&& prop) - : table_properties(prop) {} - // the size of the file. - uint64_t file_size; - // Detailed properties of the created file. - TableProperties table_properties; - // The status indicating whether the creation was successful or not. - Status status; - // The checksum of the table file being created - std::string file_checksum; - // The checksum function name of checksum generator used for this table file - std::string file_checksum_func_name; -}; - -enum class CompactionReason : int { - kUnknown = 0, - // [Level] number of L0 files > level0_file_num_compaction_trigger - kLevelL0FilesNum, - // [Level] total size of level > MaxBytesForLevel() - kLevelMaxLevelSize, - // [Universal] Compacting for size amplification - kUniversalSizeAmplification, - // [Universal] Compacting for size ratio - kUniversalSizeRatio, - // [Universal] number of sorted runs > level0_file_num_compaction_trigger - kUniversalSortedRunNum, - // [FIFO] total size > max_table_files_size - kFIFOMaxSize, - // [FIFO] reduce number of files. - kFIFOReduceNumFiles, - // [FIFO] files with creation time < (current_time - interval) - kFIFOTtl, - // Manual compaction - kManualCompaction, - // DB::SuggestCompactRange() marked files for compaction - kFilesMarkedForCompaction, - // [Level] Automatic compaction within bottommost level to cleanup duplicate - // versions of same user key, usually due to a released snapshot. - kBottommostFiles, - // Compaction based on TTL - kTtl, - // According to the comments in flush_job.cc, RocksDB treats flush as - // a level 0 compaction in internal stats. - kFlush, - // Compaction caused by external sst file ingestion - kExternalSstIngestion, - // Compaction due to SST file being too old - kPeriodicCompaction, - // total number of compaction reasons, new reasons must be added above this. - kNumOfReasons, -}; - -enum class FlushReason : int { - kOthers = 0x00, - kGetLiveFiles = 0x01, - kShutDown = 0x02, - kExternalFileIngestion = 0x03, - kManualCompaction = 0x04, - kWriteBufferManager = 0x05, - kWriteBufferFull = 0x06, - kTest = 0x07, - kDeleteFiles = 0x08, - kAutoCompaction = 0x09, - kManualFlush = 0x0a, - kErrorRecovery = 0xb, - // When set the flush reason to kErrorRecoveryRetryFlush, SwitchMemtable - // will not be called to avoid many small immutable memtables. - kErrorRecoveryRetryFlush = 0xc, -}; - -enum class BackgroundErrorReason { - kFlush, - kCompaction, - kWriteCallback, - kMemTable, - kManifestWrite, - kFlushNoWAL, -}; - -enum class WriteStallCondition { - kNormal, - kDelayed, - kStopped, -}; - -struct WriteStallInfo { - // the name of the column family - std::string cf_name; - // state of the write controller - struct { - WriteStallCondition cur; - WriteStallCondition prev; - } condition; -}; - -#ifndef ROCKSDB_LITE - -struct TableFileDeletionInfo { - // The name of the database where the file was deleted. - std::string db_name; - // The path to the deleted file. - std::string file_path; - // The id of the job which deleted the file. - int job_id; - // The status indicating whether the deletion was successful or not. - Status status; -}; - -enum class FileOperationType { - kRead, - kWrite, - kTruncate, - kClose, - kFlush, - kSync, - kFsync, - kRangeSync -}; - -struct FileOperationInfo { - using Duration = std::chrono::nanoseconds; - using SteadyTimePoint = - std::chrono::time_point; - using SystemTimePoint = - std::chrono::time_point; - using StartTimePoint = std::pair; - using FinishTimePoint = SteadyTimePoint; - - FileOperationType type; - const std::string& path; - uint64_t offset; - size_t length; - const Duration duration; - const SystemTimePoint& start_ts; - Status status; - FileOperationInfo(const FileOperationType _type, const std::string& _path, - const StartTimePoint& _start_ts, - const FinishTimePoint& _finish_ts, const Status& _status) - : type(_type), - path(_path), - duration(std::chrono::duration_cast( - _finish_ts - _start_ts.second)), - start_ts(_start_ts.first), - status(_status) {} - static StartTimePoint StartNow() { - return std::make_pair( - std::chrono::system_clock::now(), std::chrono::steady_clock::now()); - } - static FinishTimePoint FinishNow() { - return std::chrono::steady_clock::now(); - } -}; - -struct FlushJobInfo { - // the id of the column family - uint32_t cf_id; - // the name of the column family - std::string cf_name; - // the path to the newly created file - std::string file_path; - // the file number of the newly created file - uint64_t file_number; - // the oldest blob file referenced by the newly created file - uint64_t oldest_blob_file_number; - // the id of the thread that completed this flush job. - uint64_t thread_id; - // the job id, which is unique in the same thread. - int job_id; - // If true, then rocksdb is currently slowing-down all writes to prevent - // creating too many Level 0 files as compaction seems not able to - // catch up the write request speed. This indicates that there are - // too many files in Level 0. - bool triggered_writes_slowdown; - // If true, then rocksdb is currently blocking any writes to prevent - // creating more L0 files. This indicates that there are too many - // files in level 0. Compactions should try to compact L0 files down - // to lower levels as soon as possible. - bool triggered_writes_stop; - // The smallest sequence number in the newly created file - SequenceNumber smallest_seqno; - // The largest sequence number in the newly created file - SequenceNumber largest_seqno; - // Table properties of the table being flushed - TableProperties table_properties; - - FlushReason flush_reason; -}; - -struct CompactionFileInfo { - // The level of the file. - int level; - - // The file number of the file. - uint64_t file_number; - - // The file number of the oldest blob file this SST file references. - uint64_t oldest_blob_file_number; -}; - -struct CompactionJobInfo { - ~CompactionJobInfo() { status.PermitUncheckedError(); } - // the id of the column family where the compaction happened. - uint32_t cf_id; - // the name of the column family where the compaction happened. - std::string cf_name; - // the status indicating whether the compaction was successful or not. - Status status; - // the id of the thread that completed this compaction job. - uint64_t thread_id; - // the job id, which is unique in the same thread. - int job_id; - // the smallest input level of the compaction. - int base_input_level; - // the output level of the compaction. - int output_level; - - // The following variables contain information about compaction inputs - // and outputs. A file may appear in both the input and output lists - // if it was simply moved to a different level. The order of elements - // is the same across input_files and input_file_infos; similarly, it is - // the same across output_files and output_file_infos. - - // The names of the compaction input files. - std::vector input_files; - - // Additional information about the compaction input files. - std::vector input_file_infos; - - // The names of the compaction output files. - std::vector output_files; - - // Additional information about the compaction output files. - std::vector output_file_infos; - - // Table properties for input and output tables. - // The map is keyed by values from input_files and output_files. - TablePropertiesCollection table_properties; - - // Reason to run the compaction - CompactionReason compaction_reason; - - // Compression algorithm used for output files - CompressionType compression; - - // Statistics and other additional details on the compaction - CompactionJobStats stats; -}; - -struct MemTableInfo { - // the name of the column family to which memtable belongs - std::string cf_name; - // Sequence number of the first element that was inserted - // into the memtable. - SequenceNumber first_seqno; - // Sequence number that is guaranteed to be smaller than or equal - // to the sequence number of any key that could be inserted into this - // memtable. It can then be assumed that any write with a larger(or equal) - // sequence number will be present in this memtable or a later memtable. - SequenceNumber earliest_seqno; - // Total number of entries in memtable - uint64_t num_entries; - // Total number of deletes in memtable - uint64_t num_deletes; -}; - -struct ExternalFileIngestionInfo { - // the name of the column family - std::string cf_name; - // Path of the file outside the DB - std::string external_file_path; - // Path of the file inside the DB - std::string internal_file_path; - // The global sequence number assigned to keys in this file - SequenceNumber global_seqno; - // Table properties of the table being flushed - TableProperties table_properties; -}; - -// EventListener class contains a set of callback functions that will -// be called when specific RocksDB event happens such as flush. It can -// be used as a building block for developing custom features such as -// stats-collector or external compaction algorithm. -// -// Note that callback functions should not run for an extended period of -// time before the function returns, otherwise RocksDB may be blocked. -// For example, it is not suggested to do DB::CompactFiles() (as it may -// run for a long while) or issue many of DB::Put() (as Put may be blocked -// in certain cases) in the same thread in the EventListener callback. -// However, doing DB::CompactFiles() and DB::Put() in another thread is -// considered safe. -// -// [Threading] All EventListener callback will be called using the -// actual thread that involves in that specific event. For example, it -// is the RocksDB background flush thread that does the actual flush to -// call EventListener::OnFlushCompleted(). -// -// [Locking] All EventListener callbacks are designed to be called without -// the current thread holding any DB mutex. This is to prevent potential -// deadlock and performance issue when using EventListener callback -// in a complex way. -class EventListener { - public: - // A callback function to RocksDB which will be called whenever a - // registered RocksDB flushes a file. The default implementation is - // no-op. - // - // Note that the this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - virtual void OnFlushCompleted(DB* /*db*/, - const FlushJobInfo& /*flush_job_info*/) {} - - // A callback function to RocksDB which will be called before a - // RocksDB starts to flush memtables. The default implementation is - // no-op. - // - // Note that the this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - virtual void OnFlushBegin(DB* /*db*/, - const FlushJobInfo& /*flush_job_info*/) {} - - // A callback function for RocksDB which will be called whenever - // a SST file is deleted. Different from OnCompactionCompleted and - // OnFlushCompleted, this callback is designed for external logging - // service and thus only provide string parameters instead - // of a pointer to DB. Applications that build logic basic based - // on file creations and deletions is suggested to implement - // OnFlushCompleted and OnCompactionCompleted. - // - // Note that if applications would like to use the passed reference - // outside this function call, they should make copies from the - // returned value. - virtual void OnTableFileDeleted(const TableFileDeletionInfo& /*info*/) {} - - // A callback function to RocksDB which will be called before a - // RocksDB starts to compact. The default implementation is - // no-op. - // - // Note that the this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - virtual void OnCompactionBegin(DB* /*db*/, const CompactionJobInfo& /*ci*/) {} - - // A callback function for RocksDB which will be called whenever - // a registered RocksDB compacts a file. The default implementation - // is a no-op. - // - // Note that this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - // - // @param db a pointer to the rocksdb instance which just compacted - // a file. - // @param ci a reference to a CompactionJobInfo struct. 'ci' is released - // after this function is returned, and must be copied if it is needed - // outside of this function. - virtual void OnCompactionCompleted(DB* /*db*/, - const CompactionJobInfo& /*ci*/) {} - - // A callback function for RocksDB which will be called whenever - // a SST file is created. Different from OnCompactionCompleted and - // OnFlushCompleted, this callback is designed for external logging - // service and thus only provide string parameters instead - // of a pointer to DB. Applications that build logic basic based - // on file creations and deletions is suggested to implement - // OnFlushCompleted and OnCompactionCompleted. - // - // Historically it will only be called if the file is successfully created. - // Now it will also be called on failure case. User can check info.status - // to see if it succeeded or not. - // - // Note that if applications would like to use the passed reference - // outside this function call, they should make copies from these - // returned value. - virtual void OnTableFileCreated(const TableFileCreationInfo& /*info*/) {} - - // A callback function for RocksDB which will be called before - // a SST file is being created. It will follow by OnTableFileCreated after - // the creation finishes. - // - // Note that if applications would like to use the passed reference - // outside this function call, they should make copies from these - // returned value. - virtual void OnTableFileCreationStarted( - const TableFileCreationBriefInfo& /*info*/) {} - - // A callback function for RocksDB which will be called before - // a memtable is made immutable. - // - // Note that the this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - // - // Note that if applications would like to use the passed reference - // outside this function call, they should make copies from these - // returned value. - virtual void OnMemTableSealed(const MemTableInfo& /*info*/) {} - - // A callback function for RocksDB which will be called before - // a column family handle is deleted. - // - // Note that the this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - // @param handle is a pointer to the column family handle to be deleted - // which will become a dangling pointer after the deletion. - virtual void OnColumnFamilyHandleDeletionStarted( - ColumnFamilyHandle* /*handle*/) {} - - // A callback function for RocksDB which will be called after an external - // file is ingested using IngestExternalFile. - // - // Note that the this function will run on the same thread as - // IngestExternalFile(), if this function is blocked, IngestExternalFile() - // will be blocked from finishing. - virtual void OnExternalFileIngested( - DB* /*db*/, const ExternalFileIngestionInfo& /*info*/) {} - - // A callback function for RocksDB which will be called before setting the - // background error status to a non-OK value. The new background error status - // is provided in `bg_error` and can be modified by the callback. E.g., a - // callback can suppress errors by resetting it to Status::OK(), thus - // preventing the database from entering read-only mode. We do not provide any - // guarantee when failed flushes/compactions will be rescheduled if the user - // suppresses an error. - // - // Note that this function can run on the same threads as flush, compaction, - // and user writes. So, it is extremely important not to perform heavy - // computations or blocking calls in this function. - virtual void OnBackgroundError(BackgroundErrorReason /* reason */, - Status* /* bg_error */) {} - - // A callback function for RocksDB which will be called whenever a change - // of superversion triggers a change of the stall conditions. - // - // Note that the this function must be implemented in a way such that - // it should not run for an extended period of time before the function - // returns. Otherwise, RocksDB may be blocked. - virtual void OnStallConditionsChanged(const WriteStallInfo& /*info*/) {} - - // A callback function for RocksDB which will be called whenever a file read - // operation finishes. - virtual void OnFileReadFinish(const FileOperationInfo& /* info */) {} - - // A callback function for RocksDB which will be called whenever a file write - // operation finishes. - virtual void OnFileWriteFinish(const FileOperationInfo& /* info */) {} - - // A callback function for RocksDB which will be called whenever a file flush - // operation finishes. - virtual void OnFileFlushFinish(const FileOperationInfo& /* info */) {} - - // A callback function for RocksDB which will be called whenever a file sync - // operation finishes. - virtual void OnFileSyncFinish(const FileOperationInfo& /* info */) {} - - // A callback function for RocksDB which will be called whenever a file - // rangeSync operation finishes. - virtual void OnFileRangeSyncFinish(const FileOperationInfo& /* info */) {} - - // A callback function for RocksDB which will be called whenever a file - // truncate operation finishes. - virtual void OnFileTruncateFinish(const FileOperationInfo& /* info */) {} - - // A callback function for RocksDB which will be called whenever a file close - // operation finishes. - virtual void OnFileCloseFinish(const FileOperationInfo& /* info */) {} - - // If true, the OnFile*Finish functions will be called. If - // false, then they won't be called. - virtual bool ShouldBeNotifiedOnFileIO() { return false; } - - // A callback function for RocksDB which will be called just before - // starting the automatic recovery process for recoverable background - // errors, such as NoSpace(). The callback can suppress the automatic - // recovery by setting *auto_recovery to false. The database will then - // have to be transitioned out of read-only mode by calling DB::Resume() - virtual void OnErrorRecoveryBegin(BackgroundErrorReason /* reason */, - Status /* bg_error */, - bool* /* auto_recovery */) {} - - // A callback function for RocksDB which will be called once the database - // is recovered from read-only mode after an error. When this is called, it - // means normal writes to the database can be issued and the user can - // initiate any further recovery actions needed - virtual void OnErrorRecoveryCompleted(Status /* old_bg_error */) {} - - virtual ~EventListener() {} -}; - -#else - -class EventListener {}; -struct FlushJobInfo {}; - -#endif // ROCKSDB_LITE - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/memory_allocator.h b/dist/darwin_amd64/include/rocksdb/memory_allocator.h deleted file mode 100644 index 60256a9..0000000 --- a/dist/darwin_amd64/include/rocksdb/memory_allocator.h +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include "rocksdb/status.h" - -#include - -namespace ROCKSDB_NAMESPACE { - -// MemoryAllocator is an interface that a client can implement to supply custom -// memory allocation and deallocation methods. See rocksdb/cache.h for more -// information. -// All methods should be thread-safe. -class MemoryAllocator { - public: - virtual ~MemoryAllocator() = default; - - // Name of the cache allocator, printed in the log - virtual const char* Name() const = 0; - - // Allocate a block of at least size. Has to be thread-safe. - virtual void* Allocate(size_t size) = 0; - - // Deallocate previously allocated block. Has to be thread-safe. - virtual void Deallocate(void* p) = 0; - - // Returns the memory size of the block allocated at p. The default - // implementation that just returns the original allocation_size is fine. - virtual size_t UsableSize(void* /*p*/, size_t allocation_size) const { - // default implementation just returns the allocation size - return allocation_size; - } -}; - -struct JemallocAllocatorOptions { - // Jemalloc tcache cache allocations by size class. For each size class, - // it caches between 20 (for large size classes) to 200 (for small size - // classes). To reduce tcache memory usage in case the allocator is access - // by large number of threads, we can control whether to cache an allocation - // by its size. - bool limit_tcache_size = false; - - // Lower bound of allocation size to use tcache, if limit_tcache_size=true. - // When used with block cache, it is recommneded to set it to block_size/4. - size_t tcache_size_lower_bound = 1024; - - // Upper bound of allocation size to use tcache, if limit_tcache_size=true. - // When used with block cache, it is recommneded to set it to block_size. - size_t tcache_size_upper_bound = 16 * 1024; -}; - -// Generate memory allocators which allocates through Jemalloc and utilize -// MADV_DONTDUMP through madvice to exclude cache items from core dump. -// Applications can use the allocator with block cache to exclude block cache -// usage from core dump. -// -// Implementation details: -// The JemallocNodumpAllocator creates a delicated jemalloc arena, and all -// allocations of the JemallocNodumpAllocator is through the same arena. -// The memory allocator hooks memory allocation of the arena, and call -// madvice() with MADV_DONTDUMP flag to exclude the piece of memory from -// core dump. Side benefit of using single arena would be reduce of jemalloc -// metadata for some workload. -// -// To mitigate mutex contention for using one single arena, jemalloc tcache -// (thread-local cache) is enabled to cache unused allocations for future use. -// The tcache normally incur 0.5M extra memory usage per-thread. The usage -// can be reduce by limitting allocation sizes to cache. -extern Status NewJemallocNodumpAllocator( - JemallocAllocatorOptions& options, - std::shared_ptr* memory_allocator); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/memtablerep.h b/dist/darwin_amd64/include/rocksdb/memtablerep.h deleted file mode 100644 index 4972326..0000000 --- a/dist/darwin_amd64/include/rocksdb/memtablerep.h +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// This file contains the interface that must be implemented by any collection -// to be used as the backing store for a MemTable. Such a collection must -// satisfy the following properties: -// (1) It does not store duplicate items. -// (2) It uses MemTableRep::KeyComparator to compare items for iteration and -// equality. -// (3) It can be accessed concurrently by multiple readers and can support -// during reads. However, it needn't support multiple concurrent writes. -// (4) Items are never deleted. -// The liberal use of assertions is encouraged to enforce (1). -// -// The factory will be passed an MemTableAllocator object when a new MemTableRep -// is requested. -// -// Users can implement their own memtable representations. We include three -// types built in: -// - SkipListRep: This is the default; it is backed by a skip list. -// - HashSkipListRep: The memtable rep that is best used for keys that are -// structured like "prefix:suffix" where iteration within a prefix is -// common and iteration across different prefixes is rare. It is backed by -// a hash map where each bucket is a skip list. -// - VectorRep: This is backed by an unordered std::vector. On iteration, the -// vector is sorted. It is intelligent about sorting; once the MarkReadOnly() -// has been called, the vector will only be sorted once. It is optimized for -// random-write-heavy workloads. -// -// The last four implementations are designed for situations in which -// iteration over the entire collection is rare since doing so requires all the -// keys to be copied into a sorted data structure. - -#pragma once - -#include -#include -#include -#include -#include - -namespace ROCKSDB_NAMESPACE { - -class Arena; -class Allocator; -class LookupKey; -class SliceTransform; -class Logger; - -typedef void* KeyHandle; - -extern Slice GetLengthPrefixedSlice(const char* data); - -class MemTableRep { - public: - // KeyComparator provides a means to compare keys, which are internal keys - // concatenated with values. - class KeyComparator { - public: - typedef ROCKSDB_NAMESPACE::Slice DecodedType; - - virtual DecodedType decode_key(const char* key) const { - // The format of key is frozen and can be terated as a part of the API - // contract. Refer to MemTable::Add for details. - return GetLengthPrefixedSlice(key); - } - - // Compare a and b. Return a negative value if a is less than b, 0 if they - // are equal, and a positive value if a is greater than b - virtual int operator()(const char* prefix_len_key1, - const char* prefix_len_key2) const = 0; - - virtual int operator()(const char* prefix_len_key, - const Slice& key) const = 0; - - virtual ~KeyComparator() {} - }; - - explicit MemTableRep(Allocator* allocator) : allocator_(allocator) {} - - // Allocate a buf of len size for storing key. The idea is that a - // specific memtable representation knows its underlying data structure - // better. By allowing it to allocate memory, it can possibly put - // correlated stuff in consecutive memory area to make processor - // prefetching more efficient. - virtual KeyHandle Allocate(const size_t len, char** buf); - - // Insert key into the collection. (The caller will pack key and value into a - // single buffer and pass that in as the parameter to Insert). - // REQUIRES: nothing that compares equal to key is currently in the - // collection, and no concurrent modifications to the table in progress - virtual void Insert(KeyHandle handle) = 0; - - // Same as ::Insert - // Returns false if MemTableRepFactory::CanHandleDuplicatedKey() is true and - // the already exists. - virtual bool InsertKey(KeyHandle handle) { - Insert(handle); - return true; - } - - // Same as Insert(), but in additional pass a hint to insert location for - // the key. If hint points to nullptr, a new hint will be populated. - // otherwise the hint will be updated to reflect the last insert location. - // - // Currently only skip-list based memtable implement the interface. Other - // implementations will fallback to Insert() by default. - virtual void InsertWithHint(KeyHandle handle, void** /*hint*/) { - // Ignore the hint by default. - Insert(handle); - } - - // Same as ::InsertWithHint - // Returns false if MemTableRepFactory::CanHandleDuplicatedKey() is true and - // the already exists. - virtual bool InsertKeyWithHint(KeyHandle handle, void** hint) { - InsertWithHint(handle, hint); - return true; - } - - // Same as ::InsertWithHint, but allow concurrnet write - // - // If hint points to nullptr, a new hint will be allocated on heap, otherwise - // the hint will be updated to reflect the last insert location. The hint is - // owned by the caller and it is the caller's responsibility to delete the - // hint later. - // - // Currently only skip-list based memtable implement the interface. Other - // implementations will fallback to InsertConcurrently() by default. - virtual void InsertWithHintConcurrently(KeyHandle handle, void** /*hint*/) { - // Ignore the hint by default. - InsertConcurrently(handle); - } - - // Same as ::InsertWithHintConcurrently - // Returns false if MemTableRepFactory::CanHandleDuplicatedKey() is true and - // the already exists. - virtual bool InsertKeyWithHintConcurrently(KeyHandle handle, void** hint) { - InsertWithHintConcurrently(handle, hint); - return true; - } - - // Like Insert(handle), but may be called concurrent with other calls - // to InsertConcurrently for other handles. - // - // Returns false if MemTableRepFactory::CanHandleDuplicatedKey() is true and - // the already exists. - virtual void InsertConcurrently(KeyHandle handle); - - // Same as ::InsertConcurrently - // Returns false if MemTableRepFactory::CanHandleDuplicatedKey() is true and - // the already exists. - virtual bool InsertKeyConcurrently(KeyHandle handle) { - InsertConcurrently(handle); - return true; - } - - // Returns true iff an entry that compares equal to key is in the collection. - virtual bool Contains(const char* key) const = 0; - - // Notify this table rep that it will no longer be added to. By default, - // does nothing. After MarkReadOnly() is called, this table rep will - // not be written to (ie No more calls to Allocate(), Insert(), - // or any writes done directly to entries accessed through the iterator.) - virtual void MarkReadOnly() {} - - // Notify this table rep that it has been flushed to stable storage. - // By default, does nothing. - // - // Invariant: MarkReadOnly() is called, before MarkFlushed(). - // Note that this method if overridden, should not run for an extended period - // of time. Otherwise, RocksDB may be blocked. - virtual void MarkFlushed() {} - - // Look up key from the mem table, since the first key in the mem table whose - // user_key matches the one given k, call the function callback_func(), with - // callback_args directly forwarded as the first parameter, and the mem table - // key as the second parameter. If the return value is false, then terminates. - // Otherwise, go through the next key. - // - // It's safe for Get() to terminate after having finished all the potential - // key for the k.user_key(), or not. - // - // Default: - // Get() function with a default value of dynamically construct an iterator, - // seek and call the call back function. - virtual void Get(const LookupKey& k, void* callback_args, - bool (*callback_func)(void* arg, const char* entry)); - - virtual uint64_t ApproximateNumEntries(const Slice& /*start_ikey*/, - const Slice& /*end_key*/) { - return 0; - } - - // Report an approximation of how much memory has been used other than memory - // that was allocated through the allocator. Safe to call from any thread. - virtual size_t ApproximateMemoryUsage() = 0; - - virtual ~MemTableRep() {} - - // Iteration over the contents of a skip collection - class Iterator { - public: - // Initialize an iterator over the specified collection. - // The returned iterator is not valid. - // explicit Iterator(const MemTableRep* collection); - virtual ~Iterator() {} - - // Returns true iff the iterator is positioned at a valid node. - virtual bool Valid() const = 0; - - // Returns the key at the current position. - // REQUIRES: Valid() - virtual const char* key() const = 0; - - // Advances to the next position. - // REQUIRES: Valid() - virtual void Next() = 0; - - // Advances to the previous position. - // REQUIRES: Valid() - virtual void Prev() = 0; - - // Advance to the first entry with a key >= target - virtual void Seek(const Slice& internal_key, const char* memtable_key) = 0; - - // retreat to the first entry with a key <= target - virtual void SeekForPrev(const Slice& internal_key, - const char* memtable_key) = 0; - - // Position at the first entry in collection. - // Final state of iterator is Valid() iff collection is not empty. - virtual void SeekToFirst() = 0; - - // Position at the last entry in collection. - // Final state of iterator is Valid() iff collection is not empty. - virtual void SeekToLast() = 0; - }; - - // Return an iterator over the keys in this representation. - // arena: If not null, the arena needs to be used to allocate the Iterator. - // When destroying the iterator, the caller will not call "delete" - // but Iterator::~Iterator() directly. The destructor needs to destroy - // all the states but those allocated in arena. - virtual Iterator* GetIterator(Arena* arena = nullptr) = 0; - - // Return an iterator that has a special Seek semantics. The result of - // a Seek might only include keys with the same prefix as the target key. - // arena: If not null, the arena is used to allocate the Iterator. - // When destroying the iterator, the caller will not call "delete" - // but Iterator::~Iterator() directly. The destructor needs to destroy - // all the states but those allocated in arena. - virtual Iterator* GetDynamicPrefixIterator(Arena* arena = nullptr) { - return GetIterator(arena); - } - - // Return true if the current MemTableRep supports merge operator. - // Default: true - virtual bool IsMergeOperatorSupported() const { return true; } - - // Return true if the current MemTableRep supports snapshot - // Default: true - virtual bool IsSnapshotSupported() const { return true; } - - protected: - // When *key is an internal key concatenated with the value, returns the - // user key. - virtual Slice UserKey(const char* key) const; - - Allocator* allocator_; -}; - -// This is the base class for all factories that are used by RocksDB to create -// new MemTableRep objects -class MemTableRepFactory { - public: - virtual ~MemTableRepFactory() {} - - virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, - Allocator*, const SliceTransform*, - Logger* logger) = 0; - virtual MemTableRep* CreateMemTableRep( - const MemTableRep::KeyComparator& key_cmp, Allocator* allocator, - const SliceTransform* slice_transform, Logger* logger, - uint32_t /* column_family_id */) { - return CreateMemTableRep(key_cmp, allocator, slice_transform, logger); - } - - virtual const char* Name() const = 0; - - // Return true if the current MemTableRep supports concurrent inserts - // Default: false - virtual bool IsInsertConcurrentlySupported() const { return false; } - - // Return true if the current MemTableRep supports detecting duplicate - // at insertion time. If true, then MemTableRep::Insert* returns - // false when if the already exists. - // Default: false - virtual bool CanHandleDuplicatedKey() const { return false; } -}; - -// This uses a skip list to store keys. It is the default. -// -// Parameters: -// lookahead: If non-zero, each iterator's seek operation will start the -// search from the previously visited record (doing at most 'lookahead' -// steps). This is an optimization for the access pattern including many -// seeks with consecutive keys. -class SkipListFactory : public MemTableRepFactory { - public: - explicit SkipListFactory(size_t lookahead = 0) : lookahead_(lookahead) {} - - using MemTableRepFactory::CreateMemTableRep; - virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, - Allocator*, const SliceTransform*, - Logger* logger) override; - virtual const char* Name() const override { return "SkipListFactory"; } - - bool IsInsertConcurrentlySupported() const override { return true; } - - bool CanHandleDuplicatedKey() const override { return true; } - - private: - const size_t lookahead_; -}; - -#ifndef ROCKSDB_LITE -// This creates MemTableReps that are backed by an std::vector. On iteration, -// the vector is sorted. This is useful for workloads where iteration is very -// rare and writes are generally not issued after reads begin. -// -// Parameters: -// count: Passed to the constructor of the underlying std::vector of each -// VectorRep. On initialization, the underlying array will be at least count -// bytes reserved for usage. -class VectorRepFactory : public MemTableRepFactory { - const size_t count_; - - public: - explicit VectorRepFactory(size_t count = 0) : count_(count) {} - - using MemTableRepFactory::CreateMemTableRep; - virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, - Allocator*, const SliceTransform*, - Logger* logger) override; - - virtual const char* Name() const override { return "VectorRepFactory"; } -}; - -// This class contains a fixed array of buckets, each -// pointing to a skiplist (null if the bucket is empty). -// bucket_count: number of fixed array buckets -// skiplist_height: the max height of the skiplist -// skiplist_branching_factor: probabilistic size ratio between adjacent -// link lists in the skiplist -extern MemTableRepFactory* NewHashSkipListRepFactory( - size_t bucket_count = 1000000, int32_t skiplist_height = 4, - int32_t skiplist_branching_factor = 4); - -// The factory is to create memtables based on a hash table: -// it contains a fixed array of buckets, each pointing to either a linked list -// or a skip list if number of entries inside the bucket exceeds -// threshold_use_skiplist. -// @bucket_count: number of fixed array buckets -// @huge_page_tlb_size: if <=0, allocate the hash table bytes from malloc. -// Otherwise from huge page TLB. The user needs to reserve -// huge pages for it to be allocated, like: -// sysctl -w vm.nr_hugepages=20 -// See linux doc Documentation/vm/hugetlbpage.txt -// @bucket_entries_logging_threshold: if number of entries in one bucket -// exceeds this number, log about it. -// @if_log_bucket_dist_when_flash: if true, log distribution of number of -// entries when flushing. -// @threshold_use_skiplist: a bucket switches to skip list if number of -// entries exceed this parameter. -extern MemTableRepFactory* NewHashLinkListRepFactory( - size_t bucket_count = 50000, size_t huge_page_tlb_size = 0, - int bucket_entries_logging_threshold = 4096, - bool if_log_bucket_dist_when_flash = true, - uint32_t threshold_use_skiplist = 256); - -#endif // ROCKSDB_LITE -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/merge_operator.h b/dist/darwin_amd64/include/rocksdb/merge_operator.h deleted file mode 100644 index a0a99fc..0000000 --- a/dist/darwin_amd64/include/rocksdb/merge_operator.h +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include -#include - -#include "rocksdb/slice.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -class Logger; - -// The Merge Operator -// -// Essentially, a MergeOperator specifies the SEMANTICS of a merge, which only -// client knows. It could be numeric addition, list append, string -// concatenation, edit data structure, ... , anything. -// The library, on the other hand, is concerned with the exercise of this -// interface, at the right time (during get, iteration, compaction...) -// -// To use merge, the client needs to provide an object implementing one of -// the following interfaces: -// a) AssociativeMergeOperator - for most simple semantics (always take -// two values, and merge them into one value, which is then put back -// into rocksdb); numeric addition and string concatenation are examples; -// -// b) MergeOperator - the generic class for all the more abstract / complex -// operations; one method (FullMergeV2) to merge a Put/Delete value with a -// merge operand; and another method (PartialMerge) that merges multiple -// operands together. this is especially useful if your key values have -// complex structures but you would still like to support client-specific -// incremental updates. -// -// AssociativeMergeOperator is simpler to implement. MergeOperator is simply -// more powerful. -// -// Refer to rocksdb-merge wiki for more details and example implementations. -// -class MergeOperator { - public: - virtual ~MergeOperator() {} - static const char* Type() { return "MergeOperator"; } - - // Gives the client a way to express the read -> modify -> write semantics - // key: (IN) The key that's associated with this merge operation. - // Client could multiplex the merge operator based on it - // if the key space is partitioned and different subspaces - // refer to different types of data which have different - // merge operation semantics - // existing: (IN) null indicates that the key does not exist before this op - // operand_list:(IN) the sequence of merge operations to apply, front() first. - // new_value:(OUT) Client is responsible for filling the merge result here. - // The string that new_value is pointing to will be empty. - // logger: (IN) Client could use this to log errors during merge. - // - // Return true on success. - // All values passed in will be client-specific values. So if this method - // returns false, it is because client specified bad data or there was - // internal corruption. This will be treated as an error by the library. - // - // Also make use of the *logger for error messages. - virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, - const std::deque& /*operand_list*/, - std::string* /*new_value*/, Logger* /*logger*/) const { - // deprecated, please use FullMergeV2() - assert(false); - return false; - } - - struct MergeOperationInput { - explicit MergeOperationInput(const Slice& _key, - const Slice* _existing_value, - const std::vector& _operand_list, - Logger* _logger) - : key(_key), - existing_value(_existing_value), - operand_list(_operand_list), - logger(_logger) {} - - // The key associated with the merge operation. - const Slice& key; - // The existing value of the current key, nullptr means that the - // value doesn't exist. - const Slice* existing_value; - // A list of operands to apply. - const std::vector& operand_list; - // Logger could be used by client to log any errors that happen during - // the merge operation. - Logger* logger; - }; - - struct MergeOperationOutput { - explicit MergeOperationOutput(std::string& _new_value, - Slice& _existing_operand) - : new_value(_new_value), existing_operand(_existing_operand) {} - - // Client is responsible for filling the merge result here. - std::string& new_value; - // If the merge result is one of the existing operands (or existing_value), - // client can set this field to the operand (or existing_value) instead of - // using new_value. - Slice& existing_operand; - }; - - // This function applies a stack of merge operands in chrionological order - // on top of an existing value. There are two ways in which this method is - // being used: - // a) During Get() operation, it used to calculate the final value of a key - // b) During compaction, in order to collapse some operands with the based - // value. - // - // Note: The name of the method is somewhat misleading, as both in the cases - // of Get() or compaction it may be called on a subset of operands: - // K: 0 +1 +2 +7 +4 +5 2 +1 +2 - // ^ - // | - // snapshot - // In the example above, Get(K) operation will call FullMerge with a base - // value of 2 and operands [+1, +2]. Compaction process might decide to - // collapse the beginning of the history up to the snapshot by performing - // full Merge with base value of 0 and operands [+1, +2, +7, +4]. - virtual bool FullMergeV2(const MergeOperationInput& merge_in, - MergeOperationOutput* merge_out) const; - - // This function performs merge(left_op, right_op) - // when both the operands are themselves merge operation types - // that you would have passed to a DB::Merge() call in the same order - // (i.e.: DB::Merge(key,left_op), followed by DB::Merge(key,right_op)). - // - // PartialMerge should combine them into a single merge operation that is - // saved into *new_value, and then it should return true. - // *new_value should be constructed such that a call to - // DB::Merge(key, *new_value) would yield the same result as a call - // to DB::Merge(key, left_op) followed by DB::Merge(key, right_op). - // - // The string that new_value is pointing to will be empty. - // - // The default implementation of PartialMergeMulti will use this function - // as a helper, for backward compatibility. Any successor class of - // MergeOperator should either implement PartialMerge or PartialMergeMulti, - // although implementing PartialMergeMulti is suggested as it is in general - // more effective to merge multiple operands at a time instead of two - // operands at a time. - // - // If it is impossible or infeasible to combine the two operations, - // leave new_value unchanged and return false. The library will - // internally keep track of the operations, and apply them in the - // correct order once a base-value (a Put/Delete/End-of-Database) is seen. - // - // TODO: Presently there is no way to differentiate between error/corruption - // and simply "return false". For now, the client should simply return - // false in any case it cannot perform partial-merge, regardless of reason. - // If there is corruption in the data, handle it in the FullMergeV2() function - // and return false there. The default implementation of PartialMerge will - // always return false. - virtual bool PartialMerge(const Slice& /*key*/, const Slice& /*left_operand*/, - const Slice& /*right_operand*/, - std::string* /*new_value*/, - Logger* /*logger*/) const { - return false; - } - - // This function performs merge when all the operands are themselves merge - // operation types that you would have passed to a DB::Merge() call in the - // same order (front() first) - // (i.e. DB::Merge(key, operand_list[0]), followed by - // DB::Merge(key, operand_list[1]), ...) - // - // PartialMergeMulti should combine them into a single merge operation that is - // saved into *new_value, and then it should return true. *new_value should - // be constructed such that a call to DB::Merge(key, *new_value) would yield - // the same result as subquential individual calls to DB::Merge(key, operand) - // for each operand in operand_list from front() to back(). - // - // The string that new_value is pointing to will be empty. - // - // The PartialMergeMulti function will be called when there are at least two - // operands. - // - // In the default implementation, PartialMergeMulti will invoke PartialMerge - // multiple times, where each time it only merges two operands. Developers - // should either implement PartialMergeMulti, or implement PartialMerge which - // is served as the helper function of the default PartialMergeMulti. - virtual bool PartialMergeMulti(const Slice& key, - const std::deque& operand_list, - std::string* new_value, Logger* logger) const; - - // The name of the MergeOperator. Used to check for MergeOperator - // mismatches (i.e., a DB created with one MergeOperator is - // accessed using a different MergeOperator) - // TODO: the name is currently not stored persistently and thus - // no checking is enforced. Client is responsible for providing - // consistent MergeOperator between DB opens. - virtual const char* Name() const = 0; - - // Determines whether the PartialMerge can be called with just a single - // merge operand. - // Override and return true for allowing a single operand. PartialMerge - // and PartialMergeMulti should be overridden and implemented - // correctly to properly handle a single operand. - virtual bool AllowSingleOperand() const { return false; } - - // Allows to control when to invoke a full merge during Get. - // This could be used to limit the number of merge operands that are looked at - // during a point lookup, thereby helping in limiting the number of levels to - // read from. - // Doesn't help with iterators. - // - // Note: the merge operands are passed to this function in the reversed order - // relative to how they were merged (passed to FullMerge or FullMergeV2) - // for performance reasons, see also: - // https://github.com/facebook/rocksdb/issues/3865 - virtual bool ShouldMerge(const std::vector& /*operands*/) const { - return false; - } -}; - -// The simpler, associative merge operator. -class AssociativeMergeOperator : public MergeOperator { - public: - ~AssociativeMergeOperator() override {} - - // Gives the client a way to express the read -> modify -> write semantics - // key: (IN) The key that's associated with this merge operation. - // existing_value:(IN) null indicates the key does not exist before this op - // value: (IN) the value to update/merge the existing_value with - // new_value: (OUT) Client is responsible for filling the merge result - // here. The string that new_value is pointing to will be empty. - // logger: (IN) Client could use this to log errors during merge. - // - // Return true on success. - // All values passed in will be client-specific values. So if this method - // returns false, it is because client specified bad data or there was - // internal corruption. The client should assume that this will be treated - // as an error by the library. - virtual bool Merge(const Slice& key, const Slice* existing_value, - const Slice& value, std::string* new_value, - Logger* logger) const = 0; - - private: - // Default implementations of the MergeOperator functions - bool FullMergeV2(const MergeOperationInput& merge_in, - MergeOperationOutput* merge_out) const override; - - bool PartialMerge(const Slice& key, const Slice& left_operand, - const Slice& right_operand, std::string* new_value, - Logger* logger) const override; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/metadata.h b/dist/darwin_amd64/include/rocksdb/metadata.h deleted file mode 100644 index 9a64a7a..0000000 --- a/dist/darwin_amd64/include/rocksdb/metadata.h +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include - -#include -#include -#include - -#include "rocksdb/types.h" - -namespace ROCKSDB_NAMESPACE { -struct ColumnFamilyMetaData; -struct LevelMetaData; -struct SstFileMetaData; - -// The metadata that describes a column family. -struct ColumnFamilyMetaData { - ColumnFamilyMetaData() : size(0), file_count(0), name("") {} - ColumnFamilyMetaData(const std::string& _name, uint64_t _size, - const std::vector&& _levels) - : size(_size), name(_name), levels(_levels) {} - - // The size of this column family in bytes, which is equal to the sum of - // the file size of its "levels". - uint64_t size; - // The number of files in this column family. - size_t file_count; - // The name of the column family. - std::string name; - // The metadata of all levels in this column family. - std::vector levels; -}; - -// The metadata that describes a level. -struct LevelMetaData { - LevelMetaData(int _level, uint64_t _size, - const std::vector&& _files) - : level(_level), size(_size), files(_files) {} - - // The level which this meta data describes. - const int level; - // The size of this level in bytes, which is equal to the sum of - // the file size of its "files". - const uint64_t size; - // The metadata of all sst files in this level. - const std::vector files; -}; - -// The metadata that describes a SST file. -struct SstFileMetaData { - SstFileMetaData() - : size(0), - file_number(0), - smallest_seqno(0), - largest_seqno(0), - num_reads_sampled(0), - being_compacted(false), - num_entries(0), - num_deletions(0), - oldest_blob_file_number(0), - oldest_ancester_time(0), - file_creation_time(0) {} - - SstFileMetaData(const std::string& _file_name, uint64_t _file_number, - const std::string& _path, size_t _size, - SequenceNumber _smallest_seqno, SequenceNumber _largest_seqno, - const std::string& _smallestkey, - const std::string& _largestkey, uint64_t _num_reads_sampled, - bool _being_compacted, uint64_t _oldest_blob_file_number, - uint64_t _oldest_ancester_time, uint64_t _file_creation_time, - std::string& _file_checksum, - std::string& _file_checksum_func_name) - : size(_size), - name(_file_name), - file_number(_file_number), - db_path(_path), - smallest_seqno(_smallest_seqno), - largest_seqno(_largest_seqno), - smallestkey(_smallestkey), - largestkey(_largestkey), - num_reads_sampled(_num_reads_sampled), - being_compacted(_being_compacted), - num_entries(0), - num_deletions(0), - oldest_blob_file_number(_oldest_blob_file_number), - oldest_ancester_time(_oldest_ancester_time), - file_creation_time(_file_creation_time), - file_checksum(_file_checksum), - file_checksum_func_name(_file_checksum_func_name) {} - - // File size in bytes. - size_t size; - // The name of the file. - std::string name; - // The id of the file. - uint64_t file_number; - // The full path where the file locates. - std::string db_path; - - SequenceNumber smallest_seqno; // Smallest sequence number in file. - SequenceNumber largest_seqno; // Largest sequence number in file. - std::string smallestkey; // Smallest user defined key in the file. - std::string largestkey; // Largest user defined key in the file. - uint64_t num_reads_sampled; // How many times the file is read. - bool being_compacted; // true if the file is currently being compacted. - - uint64_t num_entries; - uint64_t num_deletions; - - uint64_t oldest_blob_file_number; // The id of the oldest blob file - // referenced by the file. - // An SST file may be generated by compactions whose input files may - // in turn be generated by earlier compactions. The creation time of the - // oldest SST file that is the compaction ancester of this file. - // The timestamp is provided Env::GetCurrentTime(). - // 0 if the information is not available. - // - // Note: for TTL blob files, it contains the start of the expiration range. - uint64_t oldest_ancester_time; - // Timestamp when the SST file is created, provided by Env::GetCurrentTime(). - // 0 if the information is not available. - uint64_t file_creation_time; - - // The checksum of a SST file, the value is decided by the file content and - // the checksum algorithm used for this SST file. The checksum function is - // identified by the file_checksum_func_name. If the checksum function is - // not specified, file_checksum is "0" by default. - std::string file_checksum; - - // The name of the checksum function used to generate the file checksum - // value. If file checksum is not enabled (e.g., sst_file_checksum_func is - // null), file_checksum_func_name is UnknownFileChecksumFuncName, which is - // "Unknown". - std::string file_checksum_func_name; -}; - -// The full set of metadata associated with each SST file. -struct LiveFileMetaData : SstFileMetaData { - std::string column_family_name; // Name of the column family - int level; // Level at which this file resides. - LiveFileMetaData() : column_family_name(), level(0) {} -}; - -// Metadata returned as output from ExportColumnFamily() and used as input to -// CreateColumnFamiliesWithImport(). -struct ExportImportFilesMetaData { - std::string db_comparator_name; // Used to safety check at import. - std::vector files; // Vector of file metadata. -}; -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/options.h b/dist/darwin_amd64/include/rocksdb/options.h deleted file mode 100644 index 53c444a..0000000 --- a/dist/darwin_amd64/include/rocksdb/options.h +++ /dev/null @@ -1,1676 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include -#include - -#include -#include -#include -#include -#include - -#include "rocksdb/advanced_options.h" -#include "rocksdb/comparator.h" -#include "rocksdb/compression_type.h" -#include "rocksdb/env.h" -#include "rocksdb/file_checksum.h" -#include "rocksdb/listener.h" -#include "rocksdb/sst_partitioner.h" -#include "rocksdb/universal_compaction.h" -#include "rocksdb/version.h" -#include "rocksdb/write_buffer_manager.h" - -#ifdef max -#undef max -#endif - -namespace ROCKSDB_NAMESPACE { - -class Cache; -class CompactionFilter; -class CompactionFilterFactory; -class Comparator; -class ConcurrentTaskLimiter; -class Env; -enum InfoLogLevel : unsigned char; -class SstFileManager; -class FilterPolicy; -class Logger; -class MergeOperator; -class Snapshot; -class MemTableRepFactory; -class RateLimiter; -class Slice; -class Statistics; -class InternalKeyComparator; -class WalFilter; -class FileSystem; - -struct Options; -struct DbPath; - -struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions { - // The function recovers options to a previous version. Only 4.6 or later - // versions are supported. - ColumnFamilyOptions* OldDefaults(int rocksdb_major_version = 4, - int rocksdb_minor_version = 6); - - // Some functions that make it easier to optimize RocksDB - // Use this if your DB is very small (like under 1GB) and you don't want to - // spend lots of memory for memtables. - // An optional cache object is passed in to be used as the block cache - ColumnFamilyOptions* OptimizeForSmallDb( - std::shared_ptr* cache = nullptr); - - // Use this if you don't need to keep the data sorted, i.e. you'll never use - // an iterator, only Put() and Get() API calls - // - // Not supported in ROCKSDB_LITE - ColumnFamilyOptions* OptimizeForPointLookup(uint64_t block_cache_size_mb); - - // Default values for some parameters in ColumnFamilyOptions are not - // optimized for heavy workloads and big datasets, which means you might - // observe write stalls under some conditions. As a starting point for tuning - // RocksDB options, use the following two functions: - // * OptimizeLevelStyleCompaction -- optimizes level style compaction - // * OptimizeUniversalStyleCompaction -- optimizes universal style compaction - // Universal style compaction is focused on reducing Write Amplification - // Factor for big data sets, but increases Space Amplification. You can learn - // more about the different styles here: - // https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide - // Make sure to also call IncreaseParallelism(), which will provide the - // biggest performance gains. - // Note: we might use more memory than memtable_memory_budget during high - // write rate period - // - // OptimizeUniversalStyleCompaction is not supported in ROCKSDB_LITE - ColumnFamilyOptions* OptimizeLevelStyleCompaction( - uint64_t memtable_memory_budget = 512 * 1024 * 1024); - ColumnFamilyOptions* OptimizeUniversalStyleCompaction( - uint64_t memtable_memory_budget = 512 * 1024 * 1024); - - // ------------------- - // Parameters that affect behavior - - // Comparator used to define the order of keys in the table. - // Default: a comparator that uses lexicographic byte-wise ordering - // - // REQUIRES: The client must ensure that the comparator supplied - // here has the same name and orders keys *exactly* the same as the - // comparator provided to previous open calls on the same DB. - const Comparator* comparator = BytewiseComparator(); - - // REQUIRES: The client must provide a merge operator if Merge operation - // needs to be accessed. Calling Merge on a DB without a merge operator - // would result in Status::NotSupported. The client must ensure that the - // merge operator supplied here has the same name and *exactly* the same - // semantics as the merge operator provided to previous open calls on - // the same DB. The only exception is reserved for upgrade, where a DB - // previously without a merge operator is introduced to Merge operation - // for the first time. It's necessary to specify a merge operator when - // opening the DB in this case. - // Default: nullptr - std::shared_ptr merge_operator = nullptr; - - // A single CompactionFilter instance to call into during compaction. - // Allows an application to modify/delete a key-value during background - // compaction. - // - // If the client requires a new compaction filter to be used for different - // compaction runs, it can specify compaction_filter_factory instead of this - // option. The client should specify only one of the two. - // compaction_filter takes precedence over compaction_filter_factory if - // client specifies both. - // - // If multithreaded compaction is being used, the supplied CompactionFilter - // instance may be used from different threads concurrently and so should be - // thread-safe. - // - // Default: nullptr - const CompactionFilter* compaction_filter = nullptr; - - // This is a factory that provides compaction filter objects which allow - // an application to modify/delete a key-value during background compaction. - // - // A new filter will be created on each compaction run. If multithreaded - // compaction is being used, each created CompactionFilter will only be used - // from a single thread and so does not need to be thread-safe. - // - // Default: nullptr - std::shared_ptr compaction_filter_factory = nullptr; - - // ------------------- - // Parameters that affect performance - - // Amount of data to build up in memory (backed by an unsorted log - // on disk) before converting to a sorted on-disk file. - // - // Larger values increase performance, especially during bulk loads. - // Up to max_write_buffer_number write buffers may be held in memory - // at the same time, - // so you may wish to adjust this parameter to control memory usage. - // Also, a larger write buffer will result in a longer recovery time - // the next time the database is opened. - // - // Note that write_buffer_size is enforced per column family. - // See db_write_buffer_size for sharing memory across column families. - // - // Default: 64MB - // - // Dynamically changeable through SetOptions() API - size_t write_buffer_size = 64 << 20; - - // Compress blocks using the specified compression algorithm. - // - // Default: kSnappyCompression, if it's supported. If snappy is not linked - // with the library, the default is kNoCompression. - // - // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: - // ~200-500MB/s compression - // ~400-800MB/s decompression - // - // Note that these speeds are significantly faster than most - // persistent storage speeds, and therefore it is typically never - // worth switching to kNoCompression. Even if the input data is - // incompressible, the kSnappyCompression implementation will - // efficiently detect that and will switch to uncompressed mode. - // - // If you do not set `compression_opts.level`, or set it to - // `CompressionOptions::kDefaultCompressionLevel`, we will attempt to pick the - // default corresponding to `compression` as follows: - // - // - kZSTD: 3 - // - kZlibCompression: Z_DEFAULT_COMPRESSION (currently -1) - // - kLZ4HCCompression: 0 - // - For all others, we do not specify a compression level - // - // Dynamically changeable through SetOptions() API - CompressionType compression; - - // Compression algorithm that will be used for the bottommost level that - // contain files. - // - // Default: kDisableCompressionOption (Disabled) - CompressionType bottommost_compression = kDisableCompressionOption; - - // different options for compression algorithms used by bottommost_compression - // if it is enabled. To enable it, please see the definition of - // CompressionOptions. - CompressionOptions bottommost_compression_opts; - - // different options for compression algorithms - CompressionOptions compression_opts; - - // Number of files to trigger level-0 compaction. A value <0 means that - // level-0 compaction will not be triggered by number of files at all. - // - // Default: 4 - // - // Dynamically changeable through SetOptions() API - int level0_file_num_compaction_trigger = 4; - - // If non-nullptr, use the specified function to determine the - // prefixes for keys. These prefixes will be placed in the filter. - // Depending on the workload, this can reduce the number of read-IOP - // cost for scans when a prefix is passed via ReadOptions to - // db.NewIterator(). For prefix filtering to work properly, - // "prefix_extractor" and "comparator" must be such that the following - // properties hold: - // - // 1) key.starts_with(prefix(key)) - // 2) Compare(prefix(key), key) <= 0. - // 3) If Compare(k1, k2) <= 0, then Compare(prefix(k1), prefix(k2)) <= 0 - // 4) prefix(prefix(key)) == prefix(key) - // - // Default: nullptr - std::shared_ptr prefix_extractor = nullptr; - - // Control maximum total data size for a level. - // max_bytes_for_level_base is the max total for level-1. - // Maximum number of bytes for level L can be calculated as - // (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1)) - // For example, if max_bytes_for_level_base is 200MB, and if - // max_bytes_for_level_multiplier is 10, total data size for level-1 - // will be 200MB, total file size for level-2 will be 2GB, - // and total file size for level-3 will be 20GB. - // - // Default: 256MB. - // - // Dynamically changeable through SetOptions() API - uint64_t max_bytes_for_level_base = 256 * 1048576; - - // Deprecated. - uint64_t snap_refresh_nanos = 0; - - // Disable automatic compactions. Manual compactions can still - // be issued on this column family - // - // Dynamically changeable through SetOptions() API - bool disable_auto_compactions = false; - - // This is a factory that provides TableFactory objects. - // Default: a block-based table factory that provides a default - // implementation of TableBuilder and TableReader with default - // BlockBasedTableOptions. - std::shared_ptr table_factory; - - // A list of paths where SST files for this column family - // can be put into, with its target size. Similar to db_paths, - // newer data is placed into paths specified earlier in the - // vector while older data gradually moves to paths specified - // later in the vector. - // Note that, if a path is supplied to multiple column - // families, it would have files and total size from all - // the column families combined. User should provision for the - // total size(from all the column families) in such cases. - // - // If left empty, db_paths will be used. - // Default: empty - std::vector cf_paths; - - // Compaction concurrent thread limiter for the column family. - // If non-nullptr, use given concurrent thread limiter to control - // the max outstanding compaction tasks. Limiter can be shared with - // multiple column families across db instances. - // - // Default: nullptr - std::shared_ptr compaction_thread_limiter = nullptr; - - // If non-nullptr, use the specified factory for a function to determine the - // partitioning of sst files. This helps compaction to split the files - // on interesting boundaries (key prefixes) to make propagation of sst - // files less write amplifying (covering the whole key space). - // THE FEATURE IS STILL EXPERIMENTAL - // - // Default: nullptr - std::shared_ptr sst_partitioner_factory = nullptr; - - // Create ColumnFamilyOptions with default values for all fields - ColumnFamilyOptions(); - // Create ColumnFamilyOptions from Options - explicit ColumnFamilyOptions(const Options& options); - - void Dump(Logger* log) const; -}; - -enum class WALRecoveryMode : char { - // Original levelDB recovery - // - // We tolerate the last record in any log to be incomplete due to a crash - // while writing it. Zeroed bytes from preallocation are also tolerated in the - // trailing data of any log. - // - // Use case: Applications for which updates, once applied, must not be rolled - // back even after a crash-recovery. In this recovery mode, RocksDB guarantees - // this as long as `WritableFile::Append()` writes are durable. In case the - // user needs the guarantee in more situations (e.g., when - // `WritableFile::Append()` writes to page cache, but the user desires this - // guarantee in face of power-loss crash-recovery), RocksDB offers various - // mechanisms to additionally invoke `WritableFile::Sync()` in order to - // strengthen the guarantee. - // - // This differs from `kPointInTimeRecovery` in that, in case a corruption is - // detected during recovery, this mode will refuse to open the DB. Whereas, - // `kPointInTimeRecovery` will stop recovery just before the corruption since - // that is a valid point-in-time to which to recover. - kTolerateCorruptedTailRecords = 0x00, - // Recover from clean shutdown - // We don't expect to find any corruption in the WAL - // Use case : This is ideal for unit tests and rare applications that - // can require high consistency guarantee - kAbsoluteConsistency = 0x01, - // Recover to point-in-time consistency (default) - // We stop the WAL playback on discovering WAL inconsistency - // Use case : Ideal for systems that have disk controller cache like - // hard disk, SSD without super capacitor that store related data - kPointInTimeRecovery = 0x02, - // Recovery after a disaster - // We ignore any corruption in the WAL and try to salvage as much data as - // possible - // Use case : Ideal for last ditch effort to recover data or systems that - // operate with low grade unrelated data - kSkipAnyCorruptedRecords = 0x03, -}; - -struct DbPath { - std::string path; - uint64_t target_size; // Target size of total files under the path, in byte. - - DbPath() : target_size(0) {} - DbPath(const std::string& p, uint64_t t) : path(p), target_size(t) {} -}; - -struct DBOptions { - // The function recovers options to the option as in version 4.6. - DBOptions* OldDefaults(int rocksdb_major_version = 4, - int rocksdb_minor_version = 6); - - // Some functions that make it easier to optimize RocksDB - - // Use this if your DB is very small (like under 1GB) and you don't want to - // spend lots of memory for memtables. - // An optional cache object is passed in for the memory of the - // memtable to cost to - DBOptions* OptimizeForSmallDb(std::shared_ptr* cache = nullptr); - -#ifndef ROCKSDB_LITE - // By default, RocksDB uses only one background thread for flush and - // compaction. Calling this function will set it up such that total of - // `total_threads` is used. Good value for `total_threads` is the number of - // cores. You almost definitely want to call this function if your system is - // bottlenecked by RocksDB. - DBOptions* IncreaseParallelism(int total_threads = 16); -#endif // ROCKSDB_LITE - - // If true, the database will be created if it is missing. - // Default: false - bool create_if_missing = false; - - // If true, missing column families will be automatically created. - // Default: false - bool create_missing_column_families = false; - - // If true, an error is raised if the database already exists. - // Default: false - bool error_if_exists = false; - - // If true, RocksDB will aggressively check consistency of the data. - // Also, if any of the writes to the database fails (Put, Delete, Merge, - // Write), the database will switch to read-only mode and fail all other - // Write operations. - // In most cases you want this to be set to true. - // Default: true - bool paranoid_checks = true; - - // Use the specified object to interact with the environment, - // e.g. to read/write files, schedule background work, etc. In the near - // future, support for doing storage operations such as read/write files - // through env will be deprecated in favor of file_system (see below) - // Default: Env::Default() - Env* env = Env::Default(); - - // Use to control write rate of flush and compaction. Flush has higher - // priority than compaction. Rate limiting is disabled if nullptr. - // If rate limiter is enabled, bytes_per_sync is set to 1MB by default. - // Default: nullptr - std::shared_ptr rate_limiter = nullptr; - - // Use to track SST files and control their file deletion rate. - // - // Features: - // - Throttle the deletion rate of the SST files. - // - Keep track the total size of all SST files. - // - Set a maximum allowed space limit for SST files that when reached - // the DB wont do any further flushes or compactions and will set the - // background error. - // - Can be shared between multiple dbs. - // Limitations: - // - Only track and throttle deletes of SST files in - // first db_path (db_name if db_paths is empty). - // - // Default: nullptr - std::shared_ptr sst_file_manager = nullptr; - - // Any internal progress/error information generated by the db will - // be written to info_log if it is non-nullptr, or to a file stored - // in the same directory as the DB contents if info_log is nullptr. - // Default: nullptr - std::shared_ptr info_log = nullptr; - -#ifdef NDEBUG - InfoLogLevel info_log_level = INFO_LEVEL; -#else - InfoLogLevel info_log_level = DEBUG_LEVEL; -#endif // NDEBUG - - // Number of open files that can be used by the DB. You may need to - // increase this if your database has a large working set. Value -1 means - // files opened are always kept open. You can estimate number of files based - // on target_file_size_base and target_file_size_multiplier for level-based - // compaction. For universal-style compaction, you can usually set it to -1. - // - // Default: -1 - // - // Dynamically changeable through SetDBOptions() API. - int max_open_files = -1; - - // If max_open_files is -1, DB will open all files on DB::Open(). You can - // use this option to increase the number of threads used to open the files. - // Default: 16 - int max_file_opening_threads = 16; - - // Once write-ahead logs exceed this size, we will start forcing the flush of - // column families whose memtables are backed by the oldest live WAL file - // (i.e. the ones that are causing all the space amplification). If set to 0 - // (default), we will dynamically choose the WAL size limit to be - // [sum of all write_buffer_size * max_write_buffer_number] * 4 - // This option takes effect only when there are more than one column family as - // otherwise the wal size is dictated by the write_buffer_size. - // - // Default: 0 - // - // Dynamically changeable through SetDBOptions() API. - uint64_t max_total_wal_size = 0; - - // If non-null, then we should collect metrics about database operations - std::shared_ptr statistics = nullptr; - - // By default, writes to stable storage use fdatasync (on platforms - // where this function is available). If this option is true, - // fsync is used instead. - // - // fsync and fdatasync are equally safe for our purposes and fdatasync is - // faster, so it is rarely necessary to set this option. It is provided - // as a workaround for kernel/filesystem bugs, such as one that affected - // fdatasync with ext4 in kernel versions prior to 3.7. - bool use_fsync = false; - - // A list of paths where SST files can be put into, with its target size. - // Newer data is placed into paths specified earlier in the vector while - // older data gradually moves to paths specified later in the vector. - // - // For example, you have a flash device with 10GB allocated for the DB, - // as well as a hard drive of 2TB, you should config it to be: - // [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - // - // The system will try to guarantee data under each path is close to but - // not larger than the target size. But current and future file sizes used - // by determining where to place a file are based on best-effort estimation, - // which means there is a chance that the actual size under the directory - // is slightly more than target size under some workloads. User should give - // some buffer room for those cases. - // - // If none of the paths has sufficient room to place a file, the file will - // be placed to the last path anyway, despite to the target size. - // - // Placing newer data to earlier paths is also best-efforts. User should - // expect user files to be placed in higher levels in some extreme cases. - // - // If left empty, only one path will be used, which is db_name passed when - // opening the DB. - // Default: empty - std::vector db_paths; - - // This specifies the info LOG dir. - // If it is empty, the log files will be in the same dir as data. - // If it is non empty, the log files will be in the specified dir, - // and the db data dir's absolute path will be used as the log file - // name's prefix. - std::string db_log_dir = ""; - - // This specifies the absolute dir path for write-ahead logs (WAL). - // If it is empty, the log files will be in the same dir as data, - // dbname is used as the data dir by default - // If it is non empty, the log files will be in kept the specified dir. - // When destroying the db, - // all log files in wal_dir and the dir itself is deleted - std::string wal_dir = ""; - - // The periodicity when obsolete files get deleted. The default - // value is 6 hours. The files that get out of scope by compaction - // process will still get automatically delete on every compaction, - // regardless of this setting - // - // Default: 6 hours - // - // Dynamically changeable through SetDBOptions() API. - uint64_t delete_obsolete_files_period_micros = 6ULL * 60 * 60 * 1000000; - - // Maximum number of concurrent background jobs (compactions and flushes). - // - // Default: 2 - // - // Dynamically changeable through SetDBOptions() API. - int max_background_jobs = 2; - - // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the - // value of max_background_jobs. This option is ignored. - // - // Dynamically changeable through SetDBOptions() API. - int base_background_compactions = -1; - - // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the - // value of max_background_jobs. For backwards compatibility we will set - // `max_background_jobs = max_background_compactions + max_background_flushes` - // in the case where user sets at least one of `max_background_compactions` or - // `max_background_flushes` (we replace -1 by 1 in case one option is unset). - // - // Maximum number of concurrent background compaction jobs, submitted to - // the default LOW priority thread pool. - // - // If you're increasing this, also consider increasing number of threads in - // LOW priority thread pool. For more information, see - // Env::SetBackgroundThreads - // - // Default: -1 - // - // Dynamically changeable through SetDBOptions() API. - int max_background_compactions = -1; - - // This value represents the maximum number of threads that will - // concurrently perform a compaction job by breaking it into multiple, - // smaller ones that are run simultaneously. - // Default: 1 (i.e. no subcompactions) - // - // Dynamically changeable through SetDBOptions() API. - uint32_t max_subcompactions = 1; - - // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the - // value of max_background_jobs. For backwards compatibility we will set - // `max_background_jobs = max_background_compactions + max_background_flushes` - // in the case where user sets at least one of `max_background_compactions` or - // `max_background_flushes`. - // - // Maximum number of concurrent background memtable flush jobs, submitted by - // default to the HIGH priority thread pool. If the HIGH priority thread pool - // is configured to have zero threads, flush jobs will share the LOW priority - // thread pool with compaction jobs. - // - // It is important to use both thread pools when the same Env is shared by - // multiple db instances. Without a separate pool, long running compaction - // jobs could potentially block memtable flush jobs of other db instances, - // leading to unnecessary Put stalls. - // - // If you're increasing this, also consider increasing number of threads in - // HIGH priority thread pool. For more information, see - // Env::SetBackgroundThreads - // Default: -1 - int max_background_flushes = -1; - - // Specify the maximal size of the info log file. If the log file - // is larger than `max_log_file_size`, a new info log file will - // be created. - // If max_log_file_size == 0, all logs will be written to one - // log file. - size_t max_log_file_size = 0; - - // Time for the info log file to roll (in seconds). - // If specified with non-zero value, log file will be rolled - // if it has been active longer than `log_file_time_to_roll`. - // Default: 0 (disabled) - // Not supported in ROCKSDB_LITE mode! - size_t log_file_time_to_roll = 0; - - // Maximal info log files to be kept. - // Default: 1000 - size_t keep_log_file_num = 1000; - - // Recycle log files. - // If non-zero, we will reuse previously written log files for new - // logs, overwriting the old data. The value indicates how many - // such files we will keep around at any point in time for later - // use. This is more efficient because the blocks are already - // allocated and fdatasync does not need to update the inode after - // each write. - // Default: 0 - size_t recycle_log_file_num = 0; - - // manifest file is rolled over on reaching this limit. - // The older manifest file be deleted. - // The default value is 1GB so that the manifest file can grow, but not - // reach the limit of storage capacity. - uint64_t max_manifest_file_size = 1024 * 1024 * 1024; - - // Number of shards used for table cache. - int table_cache_numshardbits = 6; - - // NOT SUPPORTED ANYMORE - // int table_cache_remove_scan_count_limit; - - // The following two fields affect how archived logs will be deleted. - // 1. If both set to 0, logs will be deleted asap and will not get into - // the archive. - // 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, - // WAL files will be checked every 10 min and if total size is greater - // then WAL_size_limit_MB, they will be deleted starting with the - // earliest until size_limit is met. All empty files will be deleted. - // 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then - // WAL files will be checked every WAL_ttl_seconds / 2 and those that - // are older than WAL_ttl_seconds will be deleted. - // 4. If both are not 0, WAL files will be checked every 10 min and both - // checks will be performed with ttl being first. - uint64_t WAL_ttl_seconds = 0; - uint64_t WAL_size_limit_MB = 0; - - // Number of bytes to preallocate (via fallocate) the manifest - // files. Default is 4mb, which is reasonable to reduce random IO - // as well as prevent overallocation for mounts that preallocate - // large amounts of data (such as xfs's allocsize option). - size_t manifest_preallocation_size = 4 * 1024 * 1024; - - // Allow the OS to mmap file for reading sst tables. Default: false - bool allow_mmap_reads = false; - - // Allow the OS to mmap file for writing. - // DB::SyncWAL() only works if this is set to false. - // Default: false - bool allow_mmap_writes = false; - - // Enable direct I/O mode for read/write - // they may or may not improve performance depending on the use case - // - // Files will be opened in "direct I/O" mode - // which means that data r/w from the disk will not be cached or - // buffered. The hardware buffer of the devices may however still - // be used. Memory mapped files are not impacted by these parameters. - - // Use O_DIRECT for user and compaction reads. - // When true, we also force new_table_reader_for_compaction_inputs to true. - // Default: false - // Not supported in ROCKSDB_LITE mode! - bool use_direct_reads = false; - - // Use O_DIRECT for writes in background flush and compactions. - // Default: false - // Not supported in ROCKSDB_LITE mode! - bool use_direct_io_for_flush_and_compaction = false; - - // If false, fallocate() calls are bypassed - bool allow_fallocate = true; - - // Disable child process inherit open files. Default: true - bool is_fd_close_on_exec = true; - - // NOT SUPPORTED ANYMORE -- this options is no longer used - bool skip_log_error_on_recovery = false; - - // if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec - // - // Default: 600 (10 min) - // - // Dynamically changeable through SetDBOptions() API. - unsigned int stats_dump_period_sec = 600; - - // if not zero, dump rocksdb.stats to RocksDB every stats_persist_period_sec - // Default: 600 - unsigned int stats_persist_period_sec = 600; - - // If true, automatically persist stats to a hidden column family (column - // family name: ___rocksdb_stats_history___) every - // stats_persist_period_sec seconds; otherwise, write to an in-memory - // struct. User can query through `GetStatsHistory` API. - // If user attempts to create a column family with the same name on a DB - // which have previously set persist_stats_to_disk to true, the column family - // creation will fail, but the hidden column family will survive, as well as - // the previously persisted statistics. - // When peristing stats to disk, the stat name will be limited at 100 bytes. - // Default: false - bool persist_stats_to_disk = false; - - // if not zero, periodically take stats snapshots and store in memory, the - // memory size for stats snapshots is capped at stats_history_buffer_size - // Default: 1MB - size_t stats_history_buffer_size = 1024 * 1024; - - // If set true, will hint the underlying file system that the file - // access pattern is random, when a sst file is opened. - // Default: true - bool advise_random_on_open = true; - - // Amount of data to build up in memtables across all column - // families before writing to disk. - // - // This is distinct from write_buffer_size, which enforces a limit - // for a single memtable. - // - // This feature is disabled by default. Specify a non-zero value - // to enable it. - // - // Default: 0 (disabled) - size_t db_write_buffer_size = 0; - - // The memory usage of memtable will report to this object. The same object - // can be passed into multiple DBs and it will track the sum of size of all - // the DBs. If the total size of all live memtables of all the DBs exceeds - // a limit, a flush will be triggered in the next DB to which the next write - // is issued. - // - // If the object is only passed to one DB, the behavior is the same as - // db_write_buffer_size. When write_buffer_manager is set, the value set will - // override db_write_buffer_size. - // - // This feature is disabled by default. Specify a non-zero value - // to enable it. - // - // Default: null - std::shared_ptr write_buffer_manager = nullptr; - - // Specify the file access pattern once a compaction is started. - // It will be applied to all input files of a compaction. - // Default: NORMAL - enum AccessHint { NONE, NORMAL, SEQUENTIAL, WILLNEED }; - AccessHint access_hint_on_compaction_start = NORMAL; - - // If true, always create a new file descriptor and new table reader - // for compaction inputs. Turn this parameter on may introduce extra - // memory usage in the table reader, if it allocates extra memory - // for indexes. This will allow file descriptor prefetch options - // to be set for compaction input files and not to impact file - // descriptors for the same file used by user queries. - // Suggest to enable BlockBasedTableOptions.cache_index_and_filter_blocks - // for this mode if using block-based table. - // - // Default: false - // This flag has no affect on the behavior of compaction and plan to delete - // in the future. - bool new_table_reader_for_compaction_inputs = false; - - // If non-zero, we perform bigger reads when doing compaction. If you're - // running RocksDB on spinning disks, you should set this to at least 2MB. - // That way RocksDB's compaction is doing sequential instead of random reads. - // - // When non-zero, we also force new_table_reader_for_compaction_inputs to - // true. - // - // Default: 0 - // - // Dynamically changeable through SetDBOptions() API. - size_t compaction_readahead_size = 0; - - // This is a maximum buffer size that is used by WinMmapReadableFile in - // unbuffered disk I/O mode. We need to maintain an aligned buffer for - // reads. We allow the buffer to grow until the specified value and then - // for bigger requests allocate one shot buffers. In unbuffered mode we - // always bypass read-ahead buffer at ReadaheadRandomAccessFile - // When read-ahead is required we then make use of compaction_readahead_size - // value and always try to read ahead. With read-ahead we always - // pre-allocate buffer to the size instead of growing it up to a limit. - // - // This option is currently honored only on Windows - // - // Default: 1 Mb - // - // Special value: 0 - means do not maintain per instance buffer. Allocate - // per request buffer and avoid locking. - size_t random_access_max_buffer_size = 1024 * 1024; - - // This is the maximum buffer size that is used by WritableFileWriter. - // On Windows, we need to maintain an aligned buffer for writes. - // We allow the buffer to grow until it's size hits the limit in buffered - // IO and fix the buffer size when using direct IO to ensure alignment of - // write requests if the logical sector size is unusual - // - // Default: 1024 * 1024 (1 MB) - // - // Dynamically changeable through SetDBOptions() API. - size_t writable_file_max_buffer_size = 1024 * 1024; - - // Use adaptive mutex, which spins in the user space before resorting - // to kernel. This could reduce context switch when the mutex is not - // heavily contended. However, if the mutex is hot, we could end up - // wasting spin time. - // Default: false - bool use_adaptive_mutex = false; - - // Create DBOptions with default values for all fields - DBOptions(); - // Create DBOptions from Options - explicit DBOptions(const Options& options); - - void Dump(Logger* log) const; - - // Allows OS to incrementally sync files to disk while they are being - // written, asynchronously, in the background. This operation can be used - // to smooth out write I/Os over time. Users shouldn't rely on it for - // persistency guarantee. - // Issue one request for every bytes_per_sync written. 0 turns it off. - // - // You may consider using rate_limiter to regulate write rate to device. - // When rate limiter is enabled, it automatically enables bytes_per_sync - // to 1MB. - // - // This option applies to table files - // - // Default: 0, turned off - // - // Note: DOES NOT apply to WAL files. See wal_bytes_per_sync instead - // Dynamically changeable through SetDBOptions() API. - uint64_t bytes_per_sync = 0; - - // Same as bytes_per_sync, but applies to WAL files - // - // Default: 0, turned off - // - // Dynamically changeable through SetDBOptions() API. - uint64_t wal_bytes_per_sync = 0; - - // When true, guarantees WAL files have at most `wal_bytes_per_sync` - // bytes submitted for writeback at any given time, and SST files have at most - // `bytes_per_sync` bytes pending writeback at any given time. This can be - // used to handle cases where processing speed exceeds I/O speed during file - // generation, which can lead to a huge sync when the file is finished, even - // with `bytes_per_sync` / `wal_bytes_per_sync` properly configured. - // - // - If `sync_file_range` is supported it achieves this by waiting for any - // prior `sync_file_range`s to finish before proceeding. In this way, - // processing (compression, etc.) can proceed uninhibited in the gap - // between `sync_file_range`s, and we block only when I/O falls behind. - // - Otherwise the `WritableFile::Sync` method is used. Note this mechanism - // always blocks, thus preventing the interleaving of I/O and processing. - // - // Note: Enabling this option does not provide any additional persistence - // guarantees, as it may use `sync_file_range`, which does not write out - // metadata. - // - // Default: false - bool strict_bytes_per_sync = false; - - // A vector of EventListeners whose callback functions will be called - // when specific RocksDB event happens. - std::vector> listeners; - - // If true, then the status of the threads involved in this DB will - // be tracked and available via GetThreadList() API. - // - // Default: false - bool enable_thread_tracking = false; - - // The limited write rate to DB if soft_pending_compaction_bytes_limit or - // level0_slowdown_writes_trigger is triggered, or we are writing to the - // last mem table allowed and we allow more than 3 mem tables. It is - // calculated using size of user write requests before compression. - // RocksDB may decide to slow down more if the compaction still - // gets behind further. - // If the value is 0, we will infer a value from `rater_limiter` value - // if it is not empty, or 16MB if `rater_limiter` is empty. Note that - // if users change the rate in `rate_limiter` after DB is opened, - // `delayed_write_rate` won't be adjusted. - // - // Unit: byte per second. - // - // Default: 0 - // - // Dynamically changeable through SetDBOptions() API. - uint64_t delayed_write_rate = 0; - - // By default, a single write thread queue is maintained. The thread gets - // to the head of the queue becomes write batch group leader and responsible - // for writing to WAL and memtable for the batch group. - // - // If enable_pipelined_write is true, separate write thread queue is - // maintained for WAL write and memtable write. A write thread first enter WAL - // writer queue and then memtable writer queue. Pending thread on the WAL - // writer queue thus only have to wait for previous writers to finish their - // WAL writing but not the memtable writing. Enabling the feature may improve - // write throughput and reduce latency of the prepare phase of two-phase - // commit. - // - // Default: false - bool enable_pipelined_write = false; - - // Setting unordered_write to true trades higher write throughput with - // relaxing the immutability guarantee of snapshots. This violates the - // repeatability one expects from ::Get from a snapshot, as well as - // ::MultiGet and Iterator's consistent-point-in-time view property. - // If the application cannot tolerate the relaxed guarantees, it can implement - // its own mechanisms to work around that and yet benefit from the higher - // throughput. Using TransactionDB with WRITE_PREPARED write policy and - // two_write_queues=true is one way to achieve immutable snapshots despite - // unordered_write. - // - // By default, i.e., when it is false, rocksdb does not advance the sequence - // number for new snapshots unless all the writes with lower sequence numbers - // are already finished. This provides the immutability that we except from - // snapshots. Moreover, since Iterator and MultiGet internally depend on - // snapshots, the snapshot immutability results into Iterator and MultiGet - // offering consistent-point-in-time view. If set to true, although - // Read-Your-Own-Write property is still provided, the snapshot immutability - // property is relaxed: the writes issued after the snapshot is obtained (with - // larger sequence numbers) will be still not visible to the reads from that - // snapshot, however, there still might be pending writes (with lower sequence - // number) that will change the state visible to the snapshot after they are - // landed to the memtable. - // - // Default: false - bool unordered_write = false; - - // If true, allow multi-writers to update mem tables in parallel. - // Only some memtable_factory-s support concurrent writes; currently it - // is implemented only for SkipListFactory. Concurrent memtable writes - // are not compatible with inplace_update_support or filter_deletes. - // It is strongly recommended to set enable_write_thread_adaptive_yield - // if you are going to use this feature. - // - // Default: true - bool allow_concurrent_memtable_write = true; - - // If true, threads synchronizing with the write batch group leader will - // wait for up to write_thread_max_yield_usec before blocking on a mutex. - // This can substantially improve throughput for concurrent workloads, - // regardless of whether allow_concurrent_memtable_write is enabled. - // - // Default: true - bool enable_write_thread_adaptive_yield = true; - - // The maximum limit of number of bytes that are written in a single batch - // of WAL or memtable write. It is followed when the leader write size - // is larger than 1/8 of this limit. - // - // Default: 1 MB - uint64_t max_write_batch_group_size_bytes = 1 << 20; - - // The maximum number of microseconds that a write operation will use - // a yielding spin loop to coordinate with other write threads before - // blocking on a mutex. (Assuming write_thread_slow_yield_usec is - // set properly) increasing this value is likely to increase RocksDB - // throughput at the expense of increased CPU usage. - // - // Default: 100 - uint64_t write_thread_max_yield_usec = 100; - - // The latency in microseconds after which a std::this_thread::yield - // call (sched_yield on Linux) is considered to be a signal that - // other processes or threads would like to use the current core. - // Increasing this makes writer threads more likely to take CPU - // by spinning, which will show up as an increase in the number of - // involuntary context switches. - // - // Default: 3 - uint64_t write_thread_slow_yield_usec = 3; - - // If true, then DB::Open() will not update the statistics used to optimize - // compaction decision by loading table properties from many files. - // Turning off this feature will improve DBOpen time especially in - // disk environment. - // - // Default: false - bool skip_stats_update_on_db_open = false; - - // If true, then DB::Open() will not fetch and check sizes of all sst files. - // This may significantly speed up startup if there are many sst files, - // especially when using non-default Env with expensive GetFileSize(). - // We'll still check that all required sst files exist. - // If paranoid_checks is false, this option is ignored, and sst files are - // not checked at all. - // - // Default: false - bool skip_checking_sst_file_sizes_on_db_open = false; - - // Recovery mode to control the consistency while replaying WAL - // Default: kPointInTimeRecovery - WALRecoveryMode wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery; - - // if set to false then recovery will fail when a prepared - // transaction is encountered in the WAL - bool allow_2pc = false; - - // A global cache for table-level rows. - // Default: nullptr (disabled) - // Not supported in ROCKSDB_LITE mode! - std::shared_ptr row_cache = nullptr; - -#ifndef ROCKSDB_LITE - // A filter object supplied to be invoked while processing write-ahead-logs - // (WALs) during recovery. The filter provides a way to inspect log - // records, ignoring a particular record or skipping replay. - // The filter is invoked at startup and is invoked from a single-thread - // currently. - WalFilter* wal_filter = nullptr; -#endif // ROCKSDB_LITE - - // If true, then DB::Open / CreateColumnFamily / DropColumnFamily - // / SetOptions will fail if options file is not detected or properly - // persisted. - // - // DEFAULT: false - bool fail_if_options_file_error = false; - - // If true, then print malloc stats together with rocksdb.stats - // when printing to LOG. - // DEFAULT: false - bool dump_malloc_stats = false; - - // By default RocksDB replay WAL logs and flush them on DB open, which may - // create very small SST files. If this option is enabled, RocksDB will try - // to avoid (but not guarantee not to) flush during recovery. Also, existing - // WAL logs will be kept, so that if crash happened before flush, we still - // have logs to recover from. - // - // DEFAULT: false - bool avoid_flush_during_recovery = false; - - // By default RocksDB will flush all memtables on DB close if there are - // unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup - // DB close. Unpersisted data WILL BE LOST. - // - // DEFAULT: false - // - // Dynamically changeable through SetDBOptions() API. - bool avoid_flush_during_shutdown = false; - - // Set this option to true during creation of database if you want - // to be able to ingest behind (call IngestExternalFile() skipping keys - // that already exist, rather than overwriting matching keys). - // Setting this option to true will affect 2 things: - // 1) Disable some internal optimizations around SST file compression - // 2) Reserve bottom-most level for ingested files only. - // 3) Note that num_levels should be >= 3 if this option is turned on. - // - // DEFAULT: false - // Immutable. - bool allow_ingest_behind = false; - - // Needed to support differential snapshots. - // If set to true then DB will only process deletes with sequence number - // less than what was set by SetPreserveDeletesSequenceNumber(uint64_t ts). - // Clients are responsible to periodically call this method to advance - // the cutoff time. If this method is never called and preserve_deletes - // is set to true NO deletes will ever be processed. - // At the moment this only keeps normal deletes, SingleDeletes will - // not be preserved. - // DEFAULT: false - // Immutable (TODO: make it dynamically changeable) - bool preserve_deletes = false; - - // If enabled it uses two queues for writes, one for the ones with - // disable_memtable and one for the ones that also write to memtable. This - // allows the memtable writes not to lag behind other writes. It can be used - // to optimize MySQL 2PC in which only the commits, which are serial, write to - // memtable. - bool two_write_queues = false; - - // If true WAL is not flushed automatically after each write. Instead it - // relies on manual invocation of FlushWAL to write the WAL buffer to its - // file. - bool manual_wal_flush = false; - - // If true, RocksDB supports flushing multiple column families and committing - // their results atomically to MANIFEST. Note that it is not - // necessary to set atomic_flush to true if WAL is always enabled since WAL - // allows the database to be restored to the last persistent state in WAL. - // This option is useful when there are column families with writes NOT - // protected by WAL. - // For manual flush, application has to specify which column families to - // flush atomically in DB::Flush. - // For auto-triggered flush, RocksDB atomically flushes ALL column families. - // - // Currently, any WAL-enabled writes after atomic flush may be replayed - // independently if the process crashes later and tries to recover. - bool atomic_flush = false; - - // If true, working thread may avoid doing unnecessary and long-latency - // operation (such as deleting obsolete files directly or deleting memtable) - // and will instead schedule a background job to do it. - // Use it if you're latency-sensitive. - // If set to true, takes precedence over - // ReadOptions::background_purge_on_iterator_cleanup. - bool avoid_unnecessary_blocking_io = false; - - // Historically DB ID has always been stored in Identity File in DB folder. - // If this flag is true, the DB ID is written to Manifest file in addition - // to the Identity file. By doing this 2 problems are solved - // 1. We don't checksum the Identity file where as Manifest file is. - // 2. Since the source of truth for DB is Manifest file DB ID will sit with - // the source of truth. Previously the Identity file could be copied - // independent of Manifest and that can result in wrong DB ID. - // We recommend setting this flag to true. - // Default: false - bool write_dbid_to_manifest = false; - - // The number of bytes to prefetch when reading the log. This is mostly useful - // for reading a remotely located log, as it can save the number of - // round-trips. If 0, then the prefetching is disabled. - // - // Default: 0 - size_t log_readahead_size = 0; - - // If user does NOT provide the checksum generator factory, the file checksum - // will NOT be used. A new file checksum generator object will be created - // when a SST file is created. Therefore, each created FileChecksumGenerator - // will only be used from a single thread and so does not need to be - // thread-safe. - // - // Default: nullptr - std::shared_ptr file_checksum_gen_factory = nullptr; - - // By default, RocksDB recovery fails if any table file referenced in - // MANIFEST are missing after scanning the MANIFEST. - // Best-efforts recovery is another recovery mode that - // tries to restore the database to the most recent point in time without - // missing file. - // Currently not compatible with atomic flush. Furthermore, WAL files will - // not be used for recovery if best_efforts_recovery is true. - // Default: false - bool best_efforts_recovery = false; - - // It defines how many times db resume is called by a separate thread when - // background retryable IO Error happens. When background retryable IO - // Error happens, SetBGError is called to deal with the error. If the error - // can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), - // then db resume is called in background to recover from the error. If this - // value is 0 or negative, db resume will not be called. - // - // Default: INT_MAX - int max_bgerror_resume_count = INT_MAX; - - // If max_bgerror_resume_count is >= 2, db resume is called multiple times. - // This option decides how long to wait to retry the next resume if the - // previous resume fails and satisfy redo resume conditions. - // - // Default: 1000000 (microseconds). - uint64_t bgerror_resume_retry_interval = 1000000; - - // It allows user to opt-in to get error messages containing corrupted - // keys/values. Corrupt keys, values will be logged in the - // messages/logs/status that will help users with the useful information - // regarding affected data. By default value is set false to prevent users - // data to be exposed in the logs/messages etc. - // - // Default: false - bool allow_data_in_errors = false; -}; - -// Options to control the behavior of a database (passed to DB::Open) -struct Options : public DBOptions, public ColumnFamilyOptions { - // Create an Options object with default values for all fields. - Options() : DBOptions(), ColumnFamilyOptions() {} - - Options(const DBOptions& db_options, - const ColumnFamilyOptions& column_family_options) - : DBOptions(db_options), ColumnFamilyOptions(column_family_options) {} - - // The function recovers options to the option as in version 4.6. - Options* OldDefaults(int rocksdb_major_version = 4, - int rocksdb_minor_version = 6); - - void Dump(Logger* log) const; - - void DumpCFOptions(Logger* log) const; - - // Some functions that make it easier to optimize RocksDB - - // Set appropriate parameters for bulk loading. - // The reason that this is a function that returns "this" instead of a - // constructor is to enable chaining of multiple similar calls in the future. - // - - // All data will be in level 0 without any automatic compaction. - // It's recommended to manually call CompactRange(NULL, NULL) before reading - // from the database, because otherwise the read can be very slow. - Options* PrepareForBulkLoad(); - - // Use this if your DB is very small (like under 1GB) and you don't want to - // spend lots of memory for memtables. - Options* OptimizeForSmallDb(); -}; - -// -// An application can issue a read request (via Get/Iterators) and specify -// if that read should process data that ALREADY resides on a specified cache -// level. For example, if an application specifies kBlockCacheTier then the -// Get call will process data that is already processed in the memtable or -// the block cache. It will not page in data from the OS cache or data that -// resides in storage. -enum ReadTier { - kReadAllTier = 0x0, // data in memtable, block cache, OS cache or storage - kBlockCacheTier = 0x1, // data in memtable or block cache - kPersistedTier = 0x2, // persisted data. When WAL is disabled, this option - // will skip data in memtable. - // Note that this ReadTier currently only supports - // Get and MultiGet and does not support iterators. - kMemtableTier = 0x3 // data in memtable. used for memtable-only iterators. -}; - -// Options that control read operations -struct ReadOptions { - // If "snapshot" is non-nullptr, read as of the supplied snapshot - // (which must belong to the DB that is being read and which must - // not have been released). If "snapshot" is nullptr, use an implicit - // snapshot of the state at the beginning of this read operation. - // Default: nullptr - const Snapshot* snapshot; - - // `iterate_lower_bound` defines the smallest key at which the backward - // iterator can return an entry. Once the bound is passed, Valid() will be - // false. `iterate_lower_bound` is inclusive ie the bound value is a valid - // entry. - // - // If prefix_extractor is not null, the Seek target and `iterate_lower_bound` - // need to have the same prefix. This is because ordering is not guaranteed - // outside of prefix domain. - // - // Default: nullptr - const Slice* iterate_lower_bound; - - // "iterate_upper_bound" defines the extent upto which the forward iterator - // can returns entries. Once the bound is reached, Valid() will be false. - // "iterate_upper_bound" is exclusive ie the bound value is - // not a valid entry. If prefix_extractor is not null, the Seek target - // and iterate_upper_bound need to have the same prefix. - // This is because ordering is not guaranteed outside of prefix domain. - // - // Default: nullptr - const Slice* iterate_upper_bound; - - // RocksDB does auto-readahead for iterators on noticing more than two reads - // for a table file. The readahead starts at 8KB and doubles on every - // additional read upto 256KB. - // This option can help if most of the range scans are large, and if it is - // determined that a larger readahead than that enabled by auto-readahead is - // needed. - // Using a large readahead size (> 2MB) can typically improve the performance - // of forward iteration on spinning disks. - // Default: 0 - size_t readahead_size; - - // A threshold for the number of keys that can be skipped before failing an - // iterator seek as incomplete. The default value of 0 should be used to - // never fail a request as incomplete, even on skipping too many keys. - // Default: 0 - uint64_t max_skippable_internal_keys; - - // Specify if this read request should process data that ALREADY - // resides on a particular cache. If the required data is not - // found at the specified cache, then Status::Incomplete is returned. - // Default: kReadAllTier - ReadTier read_tier; - - // If true, all data read from underlying storage will be - // verified against corresponding checksums. - // Default: true - bool verify_checksums; - - // Should the "data block"/"index block"" read for this iteration be placed in - // block cache? - // Callers may wish to set this field to false for bulk scans. - // This would help not to the change eviction order of existing items in the - // block cache. - // Default: true - bool fill_cache; - - // Specify to create a tailing iterator -- a special iterator that has a - // view of the complete database (i.e. it can also be used to read newly - // added data) and is optimized for sequential reads. It will return records - // that were inserted into the database after the creation of the iterator. - // Default: false - // Not supported in ROCKSDB_LITE mode! - bool tailing; - - // This options is not used anymore. It was to turn on a functionality that - // has been removed. - bool managed; - - // Enable a total order seek regardless of index format (e.g. hash index) - // used in the table. Some table format (e.g. plain table) may not support - // this option. - // If true when calling Get(), we also skip prefix bloom when reading from - // block based table. It provides a way to read existing data after - // changing implementation of prefix extractor. - // Default: false - bool total_order_seek; - - // When true, by default use total_order_seek = true, and RocksDB can - // selectively enable prefix seek mode if won't generate a different result - // from total_order_seek, based on seek key, and iterator upper bound. - // Not suppported in ROCKSDB_LITE mode, in the way that even with value true - // prefix mode is not used. - // Default: false - bool auto_prefix_mode; - - // Enforce that the iterator only iterates over the same prefix as the seek. - // This option is effective only for prefix seeks, i.e. prefix_extractor is - // non-null for the column family and total_order_seek is false. Unlike - // iterate_upper_bound, prefix_same_as_start only works within a prefix - // but in both directions. - // Default: false - bool prefix_same_as_start; - - // Keep the blocks loaded by the iterator pinned in memory as long as the - // iterator is not deleted, If used when reading from tables created with - // BlockBasedTableOptions::use_delta_encoding = false, - // Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to - // return 1. - // Default: false - bool pin_data; - - // If true, when PurgeObsoleteFile is called in CleanupIteratorState, we - // schedule a background job in the flush job queue and delete obsolete files - // in background. - // Default: false - bool background_purge_on_iterator_cleanup; - - // If true, keys deleted using the DeleteRange() API will be visible to - // readers until they are naturally deleted during compaction. This improves - // read performance in DBs with many range deletions. - // Default: false - bool ignore_range_deletions; - - // A callback to determine whether relevant keys for this scan exist in a - // given table based on the table's properties. The callback is passed the - // properties of each table during iteration. If the callback returns false, - // the table will not be scanned. This option only affects Iterators and has - // no impact on point lookups. - // Default: empty (every table will be scanned) - std::function table_filter; - - // Needed to support differential snapshots. Has 2 effects: - // 1) Iterator will skip all internal keys with seqnum < iter_start_seqnum - // 2) if this param > 0 iterator will return INTERNAL keys instead of - // user keys; e.g. return tombstones as well. - // Default: 0 (don't filter by seqnum, return user keys) - SequenceNumber iter_start_seqnum; - - // Timestamp of operation. Read should return the latest data visible to the - // specified timestamp. All timestamps of the same database must be of the - // same length and format. The user is responsible for providing a customized - // compare function via Comparator to order tuples. - // For iterator, iter_start_ts is the lower bound (older) and timestamp - // serves as the upper bound. Versions of the same record that fall in - // the timestamp range will be returned. If iter_start_ts is nullptr, - // only the most recent version visible to timestamp is returned. - // The user-specified timestamp feature is still under active development, - // and the API is subject to change. - // Default: nullptr - const Slice* timestamp; - const Slice* iter_start_ts; - - // Deadline for completing an API call (Get/MultiGet/Seek/Next for now) - // in microseconds. - // It should be set to microseconds since epoch, i.e, gettimeofday or - // equivalent plus allowed duration in microseconds. The best way is to use - // env->NowMicros() + some timeout. - // This is best efforts. The call may exceed the deadline if there is IO - // involved and the file system doesn't support deadlines, or due to - // checking for deadline periodically rather than for every key if - // processing a batch - std::chrono::microseconds deadline; - - // A timeout in microseconds to be passed to the underlying FileSystem for - // reads. As opposed to deadline, this determines the timeout for each - // individual file read request. If a MultiGet/Get/Seek/Next etc call - // results in multiple reads, each read can last upto io_timeout us. - std::chrono::microseconds io_timeout; - - // It limits the maximum cumulative value size of the keys in batch while - // reading through MultiGet. Once the cumulative value size exceeds this - // soft limit then all the remaining keys are returned with status Aborted. - // - // Default: std::numeric_limits::max() - uint64_t value_size_soft_limit; - - ReadOptions(); - ReadOptions(bool cksum, bool cache); -}; - -// Options that control write operations -struct WriteOptions { - // If true, the write will be flushed from the operating system - // buffer cache (by calling WritableFile::Sync()) before the write - // is considered complete. If this flag is true, writes will be - // slower. - // - // If this flag is false, and the machine crashes, some recent - // writes may be lost. Note that if it is just the process that - // crashes (i.e., the machine does not reboot), no writes will be - // lost even if sync==false. - // - // In other words, a DB write with sync==false has similar - // crash semantics as the "write()" system call. A DB write - // with sync==true has similar crash semantics to a "write()" - // system call followed by "fdatasync()". - // - // Default: false - bool sync; - - // If true, writes will not first go to the write ahead log, - // and the write may get lost after a crash. The backup engine - // relies on write-ahead logs to back up the memtable, so if - // you disable write-ahead logs, you must create backups with - // flush_before_backup=true to avoid losing unflushed memtable data. - // Default: false - bool disableWAL; - - // If true and if user is trying to write to column families that don't exist - // (they were dropped), ignore the write (don't return an error). If there - // are multiple writes in a WriteBatch, other writes will succeed. - // Default: false - bool ignore_missing_column_families; - - // If true and we need to wait or sleep for the write request, fails - // immediately with Status::Incomplete(). - // Default: false - bool no_slowdown; - - // If true, this write request is of lower priority if compaction is - // behind. In this case, no_slowdown = true, the request will be cancelled - // immediately with Status::Incomplete() returned. Otherwise, it will be - // slowed down. The slowdown value is determined by RocksDB to guarantee - // it introduces minimum impacts to high priority writes. - // - // Default: false - bool low_pri; - - // If true, this writebatch will maintain the last insert positions of each - // memtable as hints in concurrent write. It can improve write performance - // in concurrent writes if keys in one writebatch are sequential. In - // non-concurrent writes (when concurrent_memtable_writes is false) this - // option will be ignored. - // - // Default: false - bool memtable_insert_hint_per_batch; - - // Timestamp of write operation, e.g. Put. All timestamps of the same - // database must share the same length and format. The user is also - // responsible for providing a customized compare function via Comparator to - // order tuples. If the user wants to enable timestamp, then - // all write operations must be associated with timestamp because RocksDB, as - // a single-node storage engine currently has no knowledge of global time, - // thus has to rely on the application. - // The user-specified timestamp feature is still under active development, - // and the API is subject to change. - const Slice* timestamp; - - WriteOptions() - : sync(false), - disableWAL(false), - ignore_missing_column_families(false), - no_slowdown(false), - low_pri(false), - memtable_insert_hint_per_batch(false), - timestamp(nullptr) {} -}; - -// Options that control flush operations -struct FlushOptions { - // If true, the flush will wait until the flush is done. - // Default: true - bool wait; - // If true, the flush would proceed immediately even it means writes will - // stall for the duration of the flush; if false the operation will wait - // until it's possible to do flush w/o causing stall or until required flush - // is performed by someone else (foreground call or background thread). - // Default: false - bool allow_write_stall; - FlushOptions() : wait(true), allow_write_stall(false) {} -}; - -// Create a Logger from provided DBOptions -extern Status CreateLoggerFromOptions(const std::string& dbname, - const DBOptions& options, - std::shared_ptr* logger); - -// CompactionOptions are used in CompactFiles() call. -struct CompactionOptions { - // Compaction output compression type - // Default: snappy - // If set to `kDisableCompressionOption`, RocksDB will choose compression type - // according to the `ColumnFamilyOptions`, taking into account the output - // level if `compression_per_level` is specified. - CompressionType compression; - // Compaction will create files of size `output_file_size_limit`. - // Default: MAX, which means that compaction will create a single file - uint64_t output_file_size_limit; - // If > 0, it will replace the option in the DBOptions for this compaction. - uint32_t max_subcompactions; - - CompactionOptions() - : compression(kSnappyCompression), - output_file_size_limit(std::numeric_limits::max()), - max_subcompactions(0) {} -}; - -// For level based compaction, we can configure if we want to skip/force -// bottommost level compaction. -enum class BottommostLevelCompaction { - // Skip bottommost level compaction - kSkip, - // Only compact bottommost level if there is a compaction filter - // This is the default option - kIfHaveCompactionFilter, - // Always compact bottommost level - kForce, - // Always compact bottommost level but in bottommost level avoid - // double-compacting files created in the same compaction - kForceOptimized, -}; - -// CompactRangeOptions is used by CompactRange() call. -struct CompactRangeOptions { - // If true, no other compaction will run at the same time as this - // manual compaction - bool exclusive_manual_compaction = true; - // If true, compacted files will be moved to the minimum level capable - // of holding the data or given level (specified non-negative target_level). - bool change_level = false; - // If change_level is true and target_level have non-negative value, compacted - // files will be moved to target_level. - int target_level = -1; - // Compaction outputs will be placed in options.db_paths[target_path_id]. - // Behavior is undefined if target_path_id is out of range. - uint32_t target_path_id = 0; - // By default level based compaction will only compact the bottommost level - // if there is a compaction filter - BottommostLevelCompaction bottommost_level_compaction = - BottommostLevelCompaction::kIfHaveCompactionFilter; - // If true, will execute immediately even if doing so would cause the DB to - // enter write stall mode. Otherwise, it'll sleep until load is low enough. - bool allow_write_stall = false; - // If > 0, it will replace the option in the DBOptions for this compaction. - uint32_t max_subcompactions = 0; -}; - -// IngestExternalFileOptions is used by IngestExternalFile() -struct IngestExternalFileOptions { - // Can be set to true to move the files instead of copying them. - bool move_files = false; - // If set to true, ingestion falls back to copy when move fails. - bool failed_move_fall_back_to_copy = true; - // If set to false, an ingested file keys could appear in existing snapshots - // that where created before the file was ingested. - bool snapshot_consistency = true; - // If set to false, IngestExternalFile() will fail if the file key range - // overlaps with existing keys or tombstones in the DB. - bool allow_global_seqno = true; - // If set to false and the file key range overlaps with the memtable key range - // (memtable flush required), IngestExternalFile will fail. - bool allow_blocking_flush = true; - // Set to true if you would like duplicate keys in the file being ingested - // to be skipped rather than overwriting existing data under that key. - // Usecase: back-fill of some historical data in the database without - // over-writing existing newer version of data. - // This option could only be used if the DB has been running - // with allow_ingest_behind=true since the dawn of time. - // All files will be ingested at the bottommost level with seqno=0. - bool ingest_behind = false; - // Set to true if you would like to write global_seqno to a given offset in - // the external SST file for backward compatibility. Older versions of - // RocksDB writes a global_seqno to a given offset within ingested SST files, - // and new versions of RocksDB do not. If you ingest an external SST using - // new version of RocksDB and would like to be able to downgrade to an - // older version of RocksDB, you should set 'write_global_seqno' to true. If - // your service is just starting to use the new RocksDB, we recommend that - // you set this option to false, which brings two benefits: - // 1. No extra random write for global_seqno during ingestion. - // 2. Without writing external SST file, it's possible to do checksum. - // We have a plan to set this option to false by default in the future. - bool write_global_seqno = true; - // Set to true if you would like to verify the checksums of each block of the - // external SST file before ingestion. - // Warning: setting this to true causes slowdown in file ingestion because - // the external SST file has to be read. - bool verify_checksums_before_ingest = false; - // When verify_checksums_before_ingest = true, RocksDB uses default - // readahead setting to scan the file while verifying checksums before - // ingestion. - // Users can override the default value using this option. - // Using a large readahead size (> 2MB) can typically improve the performance - // of forward iteration on spinning disks. - size_t verify_checksums_readahead_size = 0; - // Set to TRUE if user wants to verify the sst file checksum of ingested - // files. The DB checksum function will generate the checksum of each - // ingested file (if file_checksum_gen_factory is set) and compare the - // checksum function name and checksum with the ingested checksum information. - // - // If this option is set to True: 1) if DB does not enable checksum - // (file_checksum_gen_factory == nullptr), the ingested checksum information - // will be ignored; 2) If DB enable the checksum function, we calculate the - // sst file checksum after the file is moved or copied and compare the - // checksum and checksum name. If checksum or checksum function name does - // not match, ingestion will be failed. If the verification is sucessful, - // checksum and checksum function name will be stored in Manifest. - // If this option is set to FALSE, 1) if DB does not enable checksum, - // the ingested checksum information will be ignored; 2) if DB enable the - // checksum, we only verify the ingested checksum function name and we - // trust the ingested checksum. If the checksum function name matches, we - // store the checksum in Manifest. DB does not calculate the checksum during - // ingestion. However, if no checksum information is provided with the - // ingested files, DB will generate the checksum and store in the Manifest. - bool verify_file_checksum = true; -}; - -enum TraceFilterType : uint64_t { - // Trace all the operations - kTraceFilterNone = 0x0, - // Do not trace the get operations - kTraceFilterGet = 0x1 << 0, - // Do not trace the write operations - kTraceFilterWrite = 0x1 << 1 -}; - -// TraceOptions is used for StartTrace -struct TraceOptions { - // To avoid the trace file size grows large than the storage space, - // user can set the max trace file size in Bytes. Default is 64GB - uint64_t max_trace_file_size = uint64_t{64} * 1024 * 1024 * 1024; - // Specify trace sampling option, i.e. capture one per how many requests. - // Default to 1 (capture every request). - uint64_t sampling_frequency = 1; - // Note: The filtering happens before sampling. - uint64_t filter = kTraceFilterNone; -}; - -// ImportColumnFamilyOptions is used by ImportColumnFamily() -struct ImportColumnFamilyOptions { - // Can be set to true to move the files instead of copying them. - bool move_files = false; -}; - -// Options used with DB::GetApproximateSizes() -struct SizeApproximationOptions { - // Defines whether the returned size should include the recently written - // data in the mem-tables. If set to false, include_files must be true. - bool include_memtabtles = false; - // Defines whether the returned size should include data serialized to disk. - // If set to false, include_memtabtles must be true. - bool include_files = true; - // When approximating the files total size that is used to store a keys range - // using DB::GetApproximateSizes, allow approximation with an error margin of - // up to total_files_size * files_size_error_margin. This allows to take some - // shortcuts in files size approximation, resulting in better performance, - // while guaranteeing the resulting error is within a reasonable margin. - // E.g., if the value is 0.1, then the error margin of the returned files size - // approximation will be within 10%. - // If the value is non-positive - a more precise yet more CPU intensive - // estimation is performed. - double files_size_error_margin = -1.0; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/perf_context.h b/dist/darwin_amd64/include/rocksdb/perf_context.h deleted file mode 100644 index 3d61000..0000000 --- a/dist/darwin_amd64/include/rocksdb/perf_context.h +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include - -#include "rocksdb/perf_level.h" - -namespace ROCKSDB_NAMESPACE { - -// A thread local context for gathering performance counter efficiently -// and transparently. -// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats. - -// Break down performance counters by level and store per-level perf context in -// PerfContextByLevel -struct PerfContextByLevel { - // # of times bloom filter has avoided file reads, i.e., negatives. - uint64_t bloom_filter_useful = 0; - // # of times bloom FullFilter has not avoided the reads. - uint64_t bloom_filter_full_positive = 0; - // # of times bloom FullFilter has not avoided the reads and data actually - // exist. - uint64_t bloom_filter_full_true_positive = 0; - - // total number of user key returned (only include keys that are found, does - // not include keys that are deleted or merged without a final put - uint64_t user_key_return_count = 0; - - // total nanos spent on reading data from SST files - uint64_t get_from_table_nanos = 0; - - uint64_t block_cache_hit_count = 0; // total number of block cache hits - uint64_t block_cache_miss_count = 0; // total number of block cache misses - - void Reset(); // reset all performance counters to zero -}; - -struct PerfContext { - ~PerfContext(); - - PerfContext() {} - - PerfContext(const PerfContext&); - PerfContext& operator=(const PerfContext&); - PerfContext(PerfContext&&) noexcept; - - void Reset(); // reset all performance counters to zero - - std::string ToString(bool exclude_zero_counters = false) const; - - // enable per level perf context and allocate storage for PerfContextByLevel - void EnablePerLevelPerfContext(); - - // temporarily disable per level perf contxt by setting the flag to false - void DisablePerLevelPerfContext(); - - // free the space for PerfContextByLevel, also disable per level perf context - void ClearPerLevelPerfContext(); - - uint64_t user_key_comparison_count; // total number of user key comparisons - uint64_t block_cache_hit_count; // total number of block cache hits - uint64_t block_read_count; // total number of block reads (with IO) - uint64_t block_read_byte; // total number of bytes from block reads - uint64_t block_read_time; // total nanos spent on block reads - uint64_t block_cache_index_hit_count; // total number of index block hits - uint64_t index_block_read_count; // total number of index block reads - uint64_t block_cache_filter_hit_count; // total number of filter block hits - uint64_t filter_block_read_count; // total number of filter block reads - uint64_t compression_dict_block_read_count; // total number of compression - // dictionary block reads - uint64_t block_checksum_time; // total nanos spent on block checksum - uint64_t block_decompress_time; // total nanos spent on block decompression - - uint64_t get_read_bytes; // bytes for vals returned by Get - uint64_t multiget_read_bytes; // bytes for vals returned by MultiGet - uint64_t iter_read_bytes; // bytes for keys/vals decoded by iterator - - // total number of internal keys skipped over during iteration. - // There are several reasons for it: - // 1. when calling Next(), the iterator is in the position of the previous - // key, so that we'll need to skip it. It means this counter will always - // be incremented in Next(). - // 2. when calling Next(), we need to skip internal entries for the previous - // keys that are overwritten. - // 3. when calling Next(), Seek() or SeekToFirst(), after previous key - // before calling Next(), the seek key in Seek() or the beginning for - // SeekToFirst(), there may be one or more deleted keys before the next - // valid key that the operation should place the iterator to. We need - // to skip both of the tombstone and updates hidden by the tombstones. The - // tombstones are not included in this counter, while previous updates - // hidden by the tombstones will be included here. - // 4. symmetric cases for Prev() and SeekToLast() - // internal_recent_skipped_count is not included in this counter. - // - uint64_t internal_key_skipped_count; - // Total number of deletes and single deletes skipped over during iteration - // When calling Next(), Seek() or SeekToFirst(), after previous position - // before calling Next(), the seek key in Seek() or the beginning for - // SeekToFirst(), there may be one or more deleted keys before the next valid - // key. Every deleted key is counted once. We don't recount here if there are - // still older updates invalidated by the tombstones. - // - uint64_t internal_delete_skipped_count; - // How many times iterators skipped over internal keys that are more recent - // than the snapshot that iterator is using. - // - uint64_t internal_recent_skipped_count; - // How many values were fed into merge operator by iterators. - // - uint64_t internal_merge_count; - - uint64_t get_snapshot_time; // total nanos spent on getting snapshot - uint64_t get_from_memtable_time; // total nanos spent on querying memtables - uint64_t get_from_memtable_count; // number of mem tables queried - // total nanos spent after Get() finds a key - uint64_t get_post_process_time; - uint64_t get_from_output_files_time; // total nanos reading from output files - // total nanos spent on seeking memtable - uint64_t seek_on_memtable_time; - // number of seeks issued on memtable - // (including SeekForPrev but not SeekToFirst and SeekToLast) - uint64_t seek_on_memtable_count; - // number of Next()s issued on memtable - uint64_t next_on_memtable_count; - // number of Prev()s issued on memtable - uint64_t prev_on_memtable_count; - // total nanos spent on seeking child iters - uint64_t seek_child_seek_time; - // number of seek issued in child iterators - uint64_t seek_child_seek_count; - uint64_t seek_min_heap_time; // total nanos spent on the merge min heap - uint64_t seek_max_heap_time; // total nanos spent on the merge max heap - // total nanos spent on seeking the internal entries - uint64_t seek_internal_seek_time; - // total nanos spent on iterating internal entries to find the next user entry - uint64_t find_next_user_entry_time; - - // This group of stats provide a breakdown of time spent by Write(). - // May be inaccurate when 2PC, two_write_queues or enable_pipelined_write - // are enabled. - // - // total nanos spent on writing to WAL - uint64_t write_wal_time; - // total nanos spent on writing to mem tables - uint64_t write_memtable_time; - // total nanos spent on delaying or throttling write - uint64_t write_delay_time; - // total nanos spent on switching memtable/wal and scheduling - // flushes/compactions. - uint64_t write_scheduling_flushes_compactions_time; - // total nanos spent on writing a record, excluding the above four things - uint64_t write_pre_and_post_process_time; - - // time spent waiting for other threads of the batch group - uint64_t write_thread_wait_nanos; - - // time spent on acquiring DB mutex. - uint64_t db_mutex_lock_nanos; - // Time spent on waiting with a condition variable created with DB mutex. - uint64_t db_condition_wait_nanos; - // Time spent on merge operator. - uint64_t merge_operator_time_nanos; - - // Time spent on reading index block from block cache or SST file - uint64_t read_index_block_nanos; - // Time spent on reading filter block from block cache or SST file - uint64_t read_filter_block_nanos; - // Time spent on creating data block iterator - uint64_t new_table_block_iter_nanos; - // Time spent on creating a iterator of an SST file. - uint64_t new_table_iterator_nanos; - // Time spent on seeking a key in data/index blocks - uint64_t block_seek_nanos; - // Time spent on finding or creating a table reader - uint64_t find_table_nanos; - // total number of mem table bloom hits - uint64_t bloom_memtable_hit_count; - // total number of mem table bloom misses - uint64_t bloom_memtable_miss_count; - // total number of SST table bloom hits - uint64_t bloom_sst_hit_count; - // total number of SST table bloom misses - uint64_t bloom_sst_miss_count; - - // Time spent waiting on key locks in transaction lock manager. - uint64_t key_lock_wait_time; - // number of times acquiring a lock was blocked by another transaction. - uint64_t key_lock_wait_count; - - // Total time spent in Env filesystem operations. These are only populated - // when TimedEnv is used. - uint64_t env_new_sequential_file_nanos; - uint64_t env_new_random_access_file_nanos; - uint64_t env_new_writable_file_nanos; - uint64_t env_reuse_writable_file_nanos; - uint64_t env_new_random_rw_file_nanos; - uint64_t env_new_directory_nanos; - uint64_t env_file_exists_nanos; - uint64_t env_get_children_nanos; - uint64_t env_get_children_file_attributes_nanos; - uint64_t env_delete_file_nanos; - uint64_t env_create_dir_nanos; - uint64_t env_create_dir_if_missing_nanos; - uint64_t env_delete_dir_nanos; - uint64_t env_get_file_size_nanos; - uint64_t env_get_file_modification_time_nanos; - uint64_t env_rename_file_nanos; - uint64_t env_link_file_nanos; - uint64_t env_lock_file_nanos; - uint64_t env_unlock_file_nanos; - uint64_t env_new_logger_nanos; - - uint64_t get_cpu_nanos; - uint64_t iter_next_cpu_nanos; - uint64_t iter_prev_cpu_nanos; - uint64_t iter_seek_cpu_nanos; - - // Time spent in encrypting data. Populated when EncryptedEnv is used. - uint64_t encrypt_data_nanos; - // Time spent in decrypting data. Populated when EncryptedEnv is used. - uint64_t decrypt_data_nanos; - - std::map* level_to_perf_context = nullptr; - bool per_level_perf_context_enabled = false; -}; - -// Get Thread-local PerfContext object pointer -// if defined(NPERF_CONTEXT), then the pointer is not thread-local -PerfContext* get_perf_context(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/perf_level.h b/dist/darwin_amd64/include/rocksdb/perf_level.h deleted file mode 100644 index e6a7689..0000000 --- a/dist/darwin_amd64/include/rocksdb/perf_level.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -// How much perf stats to collect. Affects perf_context and iostats_context. -enum PerfLevel : unsigned char { - kUninitialized = 0, // unknown setting - kDisable = 1, // disable perf stats - kEnableCount = 2, // enable only count stats - kEnableTimeExceptForMutex = 3, // Other than count stats, also enable time - // stats except for mutexes - // Other than time, also measure CPU time counters. Still don't measure - // time (neither wall time nor CPU time) for mutexes. - kEnableTimeAndCPUTimeExceptForMutex = 4, - kEnableTime = 5, // enable count and time stats - kOutOfBounds = 6 // N.B. Must always be the last value! -}; - -// set the perf stats level for current thread -void SetPerfLevel(PerfLevel level); - -// get current perf stats level for current thread -PerfLevel GetPerfLevel(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/persistent_cache.h b/dist/darwin_amd64/include/rocksdb/persistent_cache.h deleted file mode 100644 index e2dcfca..0000000 --- a/dist/darwin_amd64/include/rocksdb/persistent_cache.h +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -#pragma once - -#include -#include -#include - -#include "rocksdb/env.h" -#include "rocksdb/slice.h" -#include "rocksdb/statistics.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -// PersistentCache -// -// Persistent cache interface for caching IO pages on a persistent medium. The -// cache interface is specifically designed for persistent read cache. -class PersistentCache { - public: - typedef std::vector> StatsType; - - virtual ~PersistentCache() {} - - // Insert to page cache - // - // page_key Identifier to identify a page uniquely across restarts - // data Page data - // size Size of the page - virtual Status Insert(const Slice& key, const char* data, - const size_t size) = 0; - - // Lookup page cache by page identifier - // - // page_key Page identifier - // buf Buffer where the data should be copied - // size Size of the page - virtual Status Lookup(const Slice& key, std::unique_ptr* data, - size_t* size) = 0; - - // Is cache storing uncompressed data ? - // - // True if the cache is configured to store uncompressed data else false - virtual bool IsCompressed() = 0; - - // Return stats as map of {string, double} per-tier - // - // Persistent cache can be initialized as a tier of caches. The stats are per - // tire top-down - virtual StatsType Stats() = 0; - - virtual std::string GetPrintableOptions() const = 0; - - // Return a new numeric id. May be used by multiple clients who are - // sharding the same persistent cache to partition the key space. Typically - // the client will allocate a new id at startup and prepend the id to its - // cache keys. - virtual uint64_t NewId() = 0; -}; - -// Factor method to create a new persistent cache -Status NewPersistentCache(Env* const env, const std::string& path, - const uint64_t size, - const std::shared_ptr& log, - const bool optimized_for_nvm, - std::shared_ptr* cache); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/rate_limiter.h b/dist/darwin_amd64/include/rocksdb/rate_limiter.h deleted file mode 100644 index 0ee89f5..0000000 --- a/dist/darwin_amd64/include/rocksdb/rate_limiter.h +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include "rocksdb/env.h" -#include "rocksdb/statistics.h" - -namespace ROCKSDB_NAMESPACE { - -class RateLimiter { - public: - enum class OpType { - // Limitation: we currently only invoke Request() with OpType::kRead for - // compactions when DBOptions::new_table_reader_for_compaction_inputs is set - kRead, - kWrite, - }; - enum class Mode { - kReadsOnly, - kWritesOnly, - kAllIo, - }; - - // For API compatibility, default to rate-limiting writes only. - explicit RateLimiter(Mode mode = Mode::kWritesOnly) : mode_(mode) {} - - virtual ~RateLimiter() {} - - // This API allows user to dynamically change rate limiter's bytes per second. - // REQUIRED: bytes_per_second > 0 - virtual void SetBytesPerSecond(int64_t bytes_per_second) = 0; - - // Deprecated. New RateLimiter derived classes should override - // Request(const int64_t, const Env::IOPriority, Statistics*) or - // Request(const int64_t, const Env::IOPriority, Statistics*, OpType) - // instead. - // - // Request for token for bytes. If this request can not be satisfied, the call - // is blocked. Caller is responsible to make sure - // bytes <= GetSingleBurstBytes() - virtual void Request(const int64_t /*bytes*/, const Env::IOPriority /*pri*/) { - assert(false); - } - - // Request for token for bytes and potentially update statistics. If this - // request can not be satisfied, the call is blocked. Caller is responsible to - // make sure bytes <= GetSingleBurstBytes(). - virtual void Request(const int64_t bytes, const Env::IOPriority pri, - Statistics* /* stats */) { - // For API compatibility, default implementation calls the older API in - // which statistics are unsupported. - Request(bytes, pri); - } - - // Requests token to read or write bytes and potentially updates statistics. - // - // If this request can not be satisfied, the call is blocked. Caller is - // responsible to make sure bytes <= GetSingleBurstBytes(). - virtual void Request(const int64_t bytes, const Env::IOPriority pri, - Statistics* stats, OpType op_type) { - if (IsRateLimited(op_type)) { - Request(bytes, pri, stats); - } - } - - // Requests token to read or write bytes and potentially updates statistics. - // Takes into account GetSingleBurstBytes() and alignment (e.g., in case of - // direct I/O) to allocate an appropriate number of bytes, which may be less - // than the number of bytes requested. - virtual size_t RequestToken(size_t bytes, size_t alignment, - Env::IOPriority io_priority, Statistics* stats, - RateLimiter::OpType op_type); - - // Max bytes can be granted in a single burst - virtual int64_t GetSingleBurstBytes() const = 0; - - // Total bytes that go through rate limiter - virtual int64_t GetTotalBytesThrough( - const Env::IOPriority pri = Env::IO_TOTAL) const = 0; - - // Total # of requests that go through rate limiter - virtual int64_t GetTotalRequests( - const Env::IOPriority pri = Env::IO_TOTAL) const = 0; - - virtual int64_t GetBytesPerSecond() const = 0; - - virtual bool IsRateLimited(OpType op_type) { - if ((mode_ == RateLimiter::Mode::kWritesOnly && - op_type == RateLimiter::OpType::kRead) || - (mode_ == RateLimiter::Mode::kReadsOnly && - op_type == RateLimiter::OpType::kWrite)) { - return false; - } - return true; - } - - protected: - Mode GetMode() { return mode_; } - - private: - const Mode mode_; -}; - -// Create a RateLimiter object, which can be shared among RocksDB instances to -// control write rate of flush and compaction. -// @rate_bytes_per_sec: this is the only parameter you want to set most of the -// time. It controls the total write rate of compaction and flush in bytes per -// second. Currently, RocksDB does not enforce rate limit for anything other -// than flush and compaction, e.g. write to WAL. -// @refill_period_us: this controls how often tokens are refilled. For example, -// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to -// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to -// burstier writes while smaller value introduces more CPU overhead. -// The default should work for most cases. -// @fairness: RateLimiter accepts high-pri requests and low-pri requests. -// A low-pri request is usually blocked in favor of hi-pri request. Currently, -// RocksDB assigns low-pri to request from compaction and high-pri to request -// from flush. Low-pri requests can get blocked if flush requests come in -// continuously. This fairness parameter grants low-pri requests permission by -// 1/fairness chance even though high-pri requests exist to avoid starvation. -// You should be good by leaving it at default 10. -// @mode: Mode indicates which types of operations count against the limit. -// @auto_tuned: Enables dynamic adjustment of rate limit within the range -// `[rate_bytes_per_sec / 20, rate_bytes_per_sec]`, according to -// the recent demand for background I/O. -extern RateLimiter* NewGenericRateLimiter( - int64_t rate_bytes_per_sec, int64_t refill_period_us = 100 * 1000, - int32_t fairness = 10, - RateLimiter::Mode mode = RateLimiter::Mode::kWritesOnly, - bool auto_tuned = false); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/rocksdb_namespace.h b/dist/darwin_amd64/include/rocksdb/rocksdb_namespace.h deleted file mode 100644 index e9f8620..0000000 --- a/dist/darwin_amd64/include/rocksdb/rocksdb_namespace.h +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_NAMESPACE -#define ROCKSDB_NAMESPACE rocksdb -#endif diff --git a/dist/darwin_amd64/include/rocksdb/slice.h b/dist/darwin_amd64/include/rocksdb/slice.h deleted file mode 100644 index c17b32c..0000000 --- a/dist/darwin_amd64/include/rocksdb/slice.h +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// Slice is a simple structure containing a pointer into some external -// storage and a size. The user of a Slice must ensure that the slice -// is not used after the corresponding external storage has been -// deallocated. -// -// Multiple threads can invoke const methods on a Slice without -// external synchronization, but if any of the threads may call a -// non-const method, all threads accessing the same Slice must use -// external synchronization. - -#pragma once - -#include -#include -#include -#include -#include - -#ifdef __cpp_lib_string_view -#include -#endif - -#include "rocksdb/cleanable.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice { - public: - // Create an empty slice. - Slice() : data_(""), size_(0) {} - - // Create a slice that refers to d[0,n-1]. - Slice(const char* d, size_t n) : data_(d), size_(n) {} - - // Create a slice that refers to the contents of "s" - /* implicit */ - Slice(const std::string& s) : data_(s.data()), size_(s.size()) {} - -#ifdef __cpp_lib_string_view - // Create a slice that refers to the same contents as "sv" - /* implicit */ - Slice(std::string_view sv) : data_(sv.data()), size_(sv.size()) {} -#endif - - // Create a slice that refers to s[0,strlen(s)-1] - /* implicit */ - Slice(const char* s) : data_(s) { size_ = (s == nullptr) ? 0 : strlen(s); } - - // Create a single slice from SliceParts using buf as storage. - // buf must exist as long as the returned Slice exists. - Slice(const struct SliceParts& parts, std::string* buf); - - // Return a pointer to the beginning of the referenced data - const char* data() const { return data_; } - - // Return the length (in bytes) of the referenced data - size_t size() const { return size_; } - - // Return true iff the length of the referenced data is zero - bool empty() const { return size_ == 0; } - - // Return the ith byte in the referenced data. - // REQUIRES: n < size() - char operator[](size_t n) const { - assert(n < size()); - return data_[n]; - } - - // Change this slice to refer to an empty array - void clear() { - data_ = ""; - size_ = 0; - } - - // Drop the first "n" bytes from this slice. - void remove_prefix(size_t n) { - assert(n <= size()); - data_ += n; - size_ -= n; - } - - void remove_suffix(size_t n) { - assert(n <= size()); - size_ -= n; - } - - // Return a string that contains the copy of the referenced data. - // when hex is true, returns a string of twice the length hex encoded (0-9A-F) - std::string ToString(bool hex = false) const; - -#ifdef __cpp_lib_string_view - // Return a string_view that references the same data as this slice. - std::string_view ToStringView() const { - return std::string_view(data_, size_); - } -#endif - - // Decodes the current slice interpreted as an hexadecimal string into result, - // if successful returns true, if this isn't a valid hex string - // (e.g not coming from Slice::ToString(true)) DecodeHex returns false. - // This slice is expected to have an even number of 0-9A-F characters - // also accepts lowercase (a-f) - bool DecodeHex(std::string* result) const; - - // Three-way comparison. Returns value: - // < 0 iff "*this" < "b", - // == 0 iff "*this" == "b", - // > 0 iff "*this" > "b" - int compare(const Slice& b) const; - - // Return true iff "x" is a prefix of "*this" - bool starts_with(const Slice& x) const { - return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); - } - - bool ends_with(const Slice& x) const { - return ((size_ >= x.size_) && - (memcmp(data_ + size_ - x.size_, x.data_, x.size_) == 0)); - } - - // Compare two slices and returns the first byte where they differ - size_t difference_offset(const Slice& b) const; - - // private: make these public for rocksdbjni access - const char* data_; - size_t size_; - - // Intentionally copyable -}; - -/** - * A Slice that can be pinned with some cleanup tasks, which will be run upon - * ::Reset() or object destruction, whichever is invoked first. This can be used - * to avoid memcpy by having the PinnableSlice object referring to the data - * that is locked in the memory and release them after the data is consumed. - */ -class PinnableSlice : public Slice, public Cleanable { - public: - PinnableSlice() { buf_ = &self_space_; } - explicit PinnableSlice(std::string* buf) { buf_ = buf; } - - PinnableSlice(PinnableSlice&& other); - PinnableSlice& operator=(PinnableSlice&& other); - - // No copy constructor and copy assignment allowed. - PinnableSlice(PinnableSlice&) = delete; - PinnableSlice& operator=(PinnableSlice&) = delete; - - inline void PinSlice(const Slice& s, CleanupFunction f, void* arg1, - void* arg2) { - assert(!pinned_); - pinned_ = true; - data_ = s.data(); - size_ = s.size(); - RegisterCleanup(f, arg1, arg2); - assert(pinned_); - } - - inline void PinSlice(const Slice& s, Cleanable* cleanable) { - assert(!pinned_); - pinned_ = true; - data_ = s.data(); - size_ = s.size(); - cleanable->DelegateCleanupsTo(this); - assert(pinned_); - } - - inline void PinSelf(const Slice& slice) { - assert(!pinned_); - buf_->assign(slice.data(), slice.size()); - data_ = buf_->data(); - size_ = buf_->size(); - assert(!pinned_); - } - - inline void PinSelf() { - assert(!pinned_); - data_ = buf_->data(); - size_ = buf_->size(); - assert(!pinned_); - } - - void remove_suffix(size_t n) { - assert(n <= size()); - if (pinned_) { - size_ -= n; - } else { - buf_->erase(size() - n, n); - PinSelf(); - } - } - - void remove_prefix(size_t n) { - assert(n <= size()); - if (pinned_) { - data_ += n; - size_ -= n; - } else { - buf_->erase(0, n); - PinSelf(); - } - } - - void Reset() { - Cleanable::Reset(); - pinned_ = false; - size_ = 0; - } - - inline std::string* GetSelf() { return buf_; } - - inline bool IsPinned() const { return pinned_; } - - private: - friend class PinnableSlice4Test; - std::string self_space_; - std::string* buf_; - bool pinned_ = false; -}; - -// A set of Slices that are virtually concatenated together. 'parts' points -// to an array of Slices. The number of elements in the array is 'num_parts'. -struct SliceParts { - SliceParts(const Slice* _parts, int _num_parts) - : parts(_parts), num_parts(_num_parts) {} - SliceParts() : parts(nullptr), num_parts(0) {} - - const Slice* parts; - int num_parts; -}; - -inline bool operator==(const Slice& x, const Slice& y) { - return ((x.size() == y.size()) && - (memcmp(x.data(), y.data(), x.size()) == 0)); -} - -inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } - -inline int Slice::compare(const Slice& b) const { - assert(data_ != nullptr && b.data_ != nullptr); - const size_t min_len = (size_ < b.size_) ? size_ : b.size_; - int r = memcmp(data_, b.data_, min_len); - if (r == 0) { - if (size_ < b.size_) - r = -1; - else if (size_ > b.size_) - r = +1; - } - return r; -} - -inline size_t Slice::difference_offset(const Slice& b) const { - size_t off = 0; - const size_t len = (size_ < b.size_) ? size_ : b.size_; - for (; off < len; off++) { - if (data_[off] != b.data_[off]) break; - } - return off; -} - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/slice_transform.h b/dist/darwin_amd64/include/rocksdb/slice_transform.h deleted file mode 100644 index 54f61f9..0000000 --- a/dist/darwin_amd64/include/rocksdb/slice_transform.h +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2012 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// Class for specifying user-defined functions which perform a -// transformation on a slice. It is not required that every slice -// belong to the domain and/or range of a function. Subclasses should -// define InDomain and InRange to determine which slices are in either -// of these sets respectively. - -#pragma once - -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; - -/* - * A SliceTransform is a generic pluggable way of transforming one string - * to another. Its primary use-case is in configuring rocksdb - * to store prefix blooms by setting prefix_extractor in - * ColumnFamilyOptions. - */ -class SliceTransform { - public: - virtual ~SliceTransform(){}; - - // Return the name of this transformation. - virtual const char* Name() const = 0; - - // Extract a prefix from a specified key. This method is called when - // a key is inserted into the db, and the returned slice is used to - // create a bloom filter. - virtual Slice Transform(const Slice& key) const = 0; - - // Determine whether the specified key is compatible with the logic - // specified in the Transform method. This method is invoked for every - // key that is inserted into the db. If this method returns true, - // then Transform is called to translate the key to its prefix and - // that returned prefix is inserted into the bloom filter. If this - // method returns false, then the call to Transform is skipped and - // no prefix is inserted into the bloom filters. - // - // For example, if the Transform method operates on a fixed length - // prefix of size 4, then an invocation to InDomain("abc") returns - // false because the specified key length(3) is shorter than the - // prefix size of 4. - // - // Wiki documentation here: - // https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes - // - virtual bool InDomain(const Slice& key) const = 0; - - // This is currently not used and remains here for backward compatibility. - virtual bool InRange(const Slice& /*dst*/) const { return false; } - - // Some SliceTransform will have a full length which can be used to - // determine if two keys are consecuitive. Can be disabled by always - // returning 0 - virtual bool FullLengthEnabled(size_t* /*len*/) const { return false; } - - // Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix. - // - // This function is not used by RocksDB, but for users. If users pass - // Options by string to RocksDB, they might not know what prefix extractor - // they are using. This function is to help users can determine: - // if they want to iterate all keys prefixing `prefix`, whether it is - // safe to use prefix bloom filter and seek to key `prefix`. - // If this function returns true, this means a user can Seek() to a prefix - // using the bloom filter. Otherwise, user needs to skip the bloom filter - // by setting ReadOptions.total_order_seek = true. - // - // Here is an example: Suppose we implement a slice transform that returns - // the first part of the string after splitting it using delimiter ",": - // 1. SameResultWhenAppended("abc,") should return true. If applying prefix - // bloom filter using it, all slices matching "abc:.*" will be extracted - // to "abc,", so any SST file or memtable containing any of those key - // will not be filtered out. - // 2. SameResultWhenAppended("abc") should return false. A user will not - // guaranteed to see all the keys matching "abc.*" if a user seek to "abc" - // against a DB with the same setting. If one SST file only contains - // "abcd,e", the file can be filtered out and the key will be invisible. - // - // i.e., an implementation always returning false is safe. - virtual bool SameResultWhenAppended(const Slice& /*prefix*/) const { - return false; - } -}; - -extern const SliceTransform* NewFixedPrefixTransform(size_t prefix_len); - -extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len); - -extern const SliceTransform* NewNoopTransform(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/snapshot.h b/dist/darwin_amd64/include/rocksdb/snapshot.h deleted file mode 100644 index 6a7212d..0000000 --- a/dist/darwin_amd64/include/rocksdb/snapshot.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include "rocksdb/types.h" - -namespace ROCKSDB_NAMESPACE { - -class DB; - -// Abstract handle to particular state of a DB. -// A Snapshot is an immutable object and can therefore be safely -// accessed from multiple threads without any external synchronization. -// -// To Create a Snapshot, call DB::GetSnapshot(). -// To Destroy a Snapshot, call DB::ReleaseSnapshot(snapshot). -class Snapshot { - public: - // returns Snapshot's sequence number - virtual SequenceNumber GetSequenceNumber() const = 0; - - protected: - virtual ~Snapshot(); -}; - -// Simple RAII wrapper class for Snapshot. -// Constructing this object will create a snapshot. Destructing will -// release the snapshot. -class ManagedSnapshot { - public: - explicit ManagedSnapshot(DB* db); - - // Instead of creating a snapshot, take ownership of the input snapshot. - ManagedSnapshot(DB* db, const Snapshot* _snapshot); - - ~ManagedSnapshot(); - - const Snapshot* snapshot(); - - private: - DB* db_; - const Snapshot* snapshot_; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/sst_dump_tool.h b/dist/darwin_amd64/include/rocksdb/sst_dump_tool.h deleted file mode 100644 index 9261ba4..0000000 --- a/dist/darwin_amd64/include/rocksdb/sst_dump_tool.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#ifndef ROCKSDB_LITE -#pragma once - -#include "rocksdb/options.h" - -namespace ROCKSDB_NAMESPACE { - -class SSTDumpTool { - public: - int Run(int argc, char const* const* argv, Options options = Options()); -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/sst_file_manager.h b/dist/darwin_amd64/include/rocksdb/sst_file_manager.h deleted file mode 100644 index 350dec7..0000000 --- a/dist/darwin_amd64/include/rocksdb/sst_file_manager.h +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include -#include - -#include "rocksdb/file_system.h" -#include "rocksdb/statistics.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class Env; -class Logger; - -// SstFileManager is used to track SST files in the DB and control their -// deletion rate. -// All SstFileManager public functions are thread-safe. -// SstFileManager is not extensible. -class SstFileManager { - public: - virtual ~SstFileManager() {} - - // Update the maximum allowed space that should be used by RocksDB, if - // the total size of the SST files exceeds max_allowed_space, writes to - // RocksDB will fail. - // - // Setting max_allowed_space to 0 will disable this feature; maximum allowed - // space will be infinite (Default value). - // - // thread-safe. - virtual void SetMaxAllowedSpaceUsage(uint64_t max_allowed_space) = 0; - - // Set the amount of buffer room each compaction should be able to leave. - // In other words, at its maximum disk space consumption, the compaction - // should still leave compaction_buffer_size available on the disk so that - // other background functions may continue, such as logging and flushing. - virtual void SetCompactionBufferSize(uint64_t compaction_buffer_size) = 0; - - // Return true if the total size of SST files exceeded the maximum allowed - // space usage. - // - // thread-safe. - virtual bool IsMaxAllowedSpaceReached() = 0; - - // Returns true if the total size of SST files as well as estimated size - // of ongoing compactions exceeds the maximums allowed space usage. - virtual bool IsMaxAllowedSpaceReachedIncludingCompactions() = 0; - - // Return the total size of all tracked files. - // thread-safe - virtual uint64_t GetTotalSize() = 0; - - // Return a map containing all tracked files and their corresponding sizes. - // thread-safe - virtual std::unordered_map GetTrackedFiles() = 0; - - // Return delete rate limit in bytes per second. - // thread-safe - virtual int64_t GetDeleteRateBytesPerSecond() = 0; - - // Update the delete rate limit in bytes per second. - // zero means disable delete rate limiting and delete files immediately - // thread-safe - virtual void SetDeleteRateBytesPerSecond(int64_t delete_rate) = 0; - - // Return trash/DB size ratio where new files will be deleted immediately - // thread-safe - virtual double GetMaxTrashDBRatio() = 0; - - // Update trash/DB size ratio where new files will be deleted immediately - // thread-safe - virtual void SetMaxTrashDBRatio(double ratio) = 0; - - // Return the total size of trash files - // thread-safe - virtual uint64_t GetTotalTrashSize() = 0; - - // Set the statistics ptr to dump the stat information - virtual void SetStatisticsPtr(const std::shared_ptr& stats) = 0; -}; - -// Create a new SstFileManager that can be shared among multiple RocksDB -// instances to track SST file and control there deletion rate. -// Even though SstFileManager don't track WAL files but it still control -// there deletion rate. -// -// @param env: Pointer to Env object, please see "rocksdb/env.h". -// @param fs: Pointer to FileSystem object (rocksdb/file_system.h" -// @param info_log: If not nullptr, info_log will be used to log errors. -// -// == Deletion rate limiting specific arguments == -// @param trash_dir: Deprecated, this argument have no effect -// @param rate_bytes_per_sec: How many bytes should be deleted per second, If -// this value is set to 1024 (1 Kb / sec) and we deleted a file of size 4 Kb -// in 1 second, we will wait for another 3 seconds before we delete other -// files, Set to 0 to disable deletion rate limiting. -// This option also affect the delete rate of WAL files in the DB. -// @param delete_existing_trash: Deprecated, this argument have no effect, but -// if user provide trash_dir we will schedule deletes for files in the dir -// @param status: If not nullptr, status will contain any errors that happened -// during creating the missing trash_dir or deleting existing files in trash. -// @param max_trash_db_ratio: If the trash size constitutes for more than this -// fraction of the total DB size we will start deleting new files passed to -// DeleteScheduler immediately -// @param bytes_max_delete_chunk: if a file to delete is larger than delete -// chunk, ftruncate the file by this size each time, rather than dropping the -// whole file. 0 means to always delete the whole file. If the file has more -// than one linked names, the file will be deleted as a whole. Either way, -// `rate_bytes_per_sec` will be appreciated. NOTE that with this option, -// files already renamed as a trash may be partial, so users should not -// directly recover them without checking. -extern SstFileManager* NewSstFileManager( - Env* env, std::shared_ptr fs, - std::shared_ptr info_log = nullptr, - const std::string& trash_dir = "", int64_t rate_bytes_per_sec = 0, - bool delete_existing_trash = true, Status* status = nullptr, - double max_trash_db_ratio = 0.25, - uint64_t bytes_max_delete_chunk = 64 * 1024 * 1024); - -// Same as above, but takes a pointer to a legacy Env object, instead of -// Env and FileSystem objects -extern SstFileManager* NewSstFileManager( - Env* env, std::shared_ptr info_log = nullptr, - std::string trash_dir = "", int64_t rate_bytes_per_sec = 0, - bool delete_existing_trash = true, Status* status = nullptr, - double max_trash_db_ratio = 0.25, - uint64_t bytes_max_delete_chunk = 64 * 1024 * 1024); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/sst_file_reader.h b/dist/darwin_amd64/include/rocksdb/sst_file_reader.h deleted file mode 100644 index 4b86424..0000000 --- a/dist/darwin_amd64/include/rocksdb/sst_file_reader.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include "rocksdb/iterator.h" -#include "rocksdb/options.h" -#include "rocksdb/slice.h" -#include "rocksdb/table_properties.h" - -namespace ROCKSDB_NAMESPACE { - -// SstFileReader is used to read sst files that are generated by DB or -// SstFileWriter. -class SstFileReader { - public: - SstFileReader(const Options& options); - - ~SstFileReader(); - - // Prepares to read from the file located at "file_path". - Status Open(const std::string& file_path); - - // Returns a new iterator over the table contents. - // Most read options provide the same control as we read from DB. - // If "snapshot" is nullptr, the iterator returns only the latest keys. - Iterator* NewIterator(const ReadOptions& options); - - std::shared_ptr GetTableProperties() const; - - // Verifies whether there is corruption in this table. - Status VerifyChecksum(const ReadOptions& /*read_options*/); - - Status VerifyChecksum() { return VerifyChecksum(ReadOptions()); } - - private: - struct Rep; - std::unique_ptr rep_; -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/sst_file_writer.h b/dist/darwin_amd64/include/rocksdb/sst_file_writer.h deleted file mode 100644 index c7a8203..0000000 --- a/dist/darwin_amd64/include/rocksdb/sst_file_writer.h +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include - -#include "rocksdb/env.h" -#include "rocksdb/options.h" -#include "rocksdb/table_properties.h" -#include "rocksdb/types.h" - -#if defined(__GNUC__) || defined(__clang__) -#define ROCKSDB_DEPRECATED_FUNC __attribute__((__deprecated__)) -#elif _WIN32 -#define ROCKSDB_DEPRECATED_FUNC __declspec(deprecated) -#endif - -namespace ROCKSDB_NAMESPACE { - -class Comparator; - -// ExternalSstFileInfo include information about sst files created -// using SstFileWriter. -struct ExternalSstFileInfo { - ExternalSstFileInfo() - : file_path(""), - smallest_key(""), - largest_key(""), - smallest_range_del_key(""), - largest_range_del_key(""), - file_checksum(""), - file_checksum_func_name(""), - sequence_number(0), - file_size(0), - num_entries(0), - num_range_del_entries(0), - version(0) {} - - ExternalSstFileInfo(const std::string& _file_path, - const std::string& _smallest_key, - const std::string& _largest_key, - SequenceNumber _sequence_number, uint64_t _file_size, - int32_t _num_entries, int32_t _version) - : file_path(_file_path), - smallest_key(_smallest_key), - largest_key(_largest_key), - smallest_range_del_key(""), - largest_range_del_key(""), - file_checksum(""), - file_checksum_func_name(""), - sequence_number(_sequence_number), - file_size(_file_size), - num_entries(_num_entries), - num_range_del_entries(0), - version(_version) {} - - std::string file_path; // external sst file path - std::string smallest_key; // smallest user key in file - std::string largest_key; // largest user key in file - std::string - smallest_range_del_key; // smallest range deletion user key in file - std::string largest_range_del_key; // largest range deletion user key in file - std::string file_checksum; // sst file checksum; - std::string file_checksum_func_name; // The name of file checksum function - SequenceNumber sequence_number; // sequence number of all keys in file - uint64_t file_size; // file size in bytes - uint64_t num_entries; // number of entries in file - uint64_t num_range_del_entries; // number of range deletion entries in file - int32_t version; // file version -}; - -// SstFileWriter is used to create sst files that can be added to database later -// All keys in files generated by SstFileWriter will have sequence number = 0. -class SstFileWriter { - public: - // User can pass `column_family` to specify that the generated file will - // be ingested into this column_family, note that passing nullptr means that - // the column_family is unknown. - // If invalidate_page_cache is set to true, SstFileWriter will give the OS a - // hint that this file pages is not needed every time we write 1MB to the - // file. To use the rate limiter an io_priority smaller than IO_TOTAL can be - // passed. - SstFileWriter(const EnvOptions& env_options, const Options& options, - ColumnFamilyHandle* column_family = nullptr, - bool invalidate_page_cache = true, - Env::IOPriority io_priority = Env::IOPriority::IO_TOTAL, - bool skip_filters = false) - : SstFileWriter(env_options, options, options.comparator, column_family, - invalidate_page_cache, io_priority, skip_filters) {} - - // Deprecated API - SstFileWriter(const EnvOptions& env_options, const Options& options, - const Comparator* user_comparator, - ColumnFamilyHandle* column_family = nullptr, - bool invalidate_page_cache = true, - Env::IOPriority io_priority = Env::IOPriority::IO_TOTAL, - bool skip_filters = false); - - ~SstFileWriter(); - - // Prepare SstFileWriter to write into file located at "file_path". - Status Open(const std::string& file_path); - - // Add a Put key with value to currently opened file (deprecated) - // REQUIRES: key is after any previously added key according to comparator. - ROCKSDB_DEPRECATED_FUNC Status Add(const Slice& user_key, const Slice& value); - - // Add a Put key with value to currently opened file - // REQUIRES: key is after any previously added key according to comparator. - Status Put(const Slice& user_key, const Slice& value); - - // Add a Merge key with value to currently opened file - // REQUIRES: key is after any previously added key according to comparator. - Status Merge(const Slice& user_key, const Slice& value); - - // Add a deletion key to currently opened file - // REQUIRES: key is after any previously added key according to comparator. - Status Delete(const Slice& user_key); - - // Add a range deletion tombstone to currently opened file - Status DeleteRange(const Slice& begin_key, const Slice& end_key); - - // Finalize writing to sst file and close file. - // - // An optional ExternalSstFileInfo pointer can be passed to the function - // which will be populated with information about the created sst file. - Status Finish(ExternalSstFileInfo* file_info = nullptr); - - // Return the current file size. - uint64_t FileSize(); - - private: - void InvalidatePageCache(bool closing); - struct Rep; - std::unique_ptr rep_; -}; -} // namespace ROCKSDB_NAMESPACE - -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/sst_partitioner.h b/dist/darwin_amd64/include/rocksdb/sst_partitioner.h deleted file mode 100644 index 5d18195..0000000 --- a/dist/darwin_amd64/include/rocksdb/sst_partitioner.h +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// - -#pragma once - -#include -#include - -#include "rocksdb/rocksdb_namespace.h" -#include "rocksdb/slice.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; - -enum PartitionerResult : char { - // Partitioner does not require to create new file - kNotRequired = 0x0, - // Partitioner is requesting forcefully to create new file - kRequired = 0x1 - // Additional constants can be added -}; - -struct PartitionerRequest { - PartitionerRequest(const Slice& prev_user_key_, - const Slice& current_user_key_, - uint64_t current_output_file_size_) - : prev_user_key(&prev_user_key_), - current_user_key(¤t_user_key_), - current_output_file_size(current_output_file_size_) {} - const Slice* prev_user_key; - const Slice* current_user_key; - uint64_t current_output_file_size; -}; - -/* - * A SstPartitioner is a generic pluggable way of defining the partition - * of SST files. Compaction job will split the SST files on partition boundary - * to lower the write amplification during SST file promote to higher level. - */ -class SstPartitioner { - public: - virtual ~SstPartitioner() {} - - // Return the name of this partitioner. - virtual const char* Name() const = 0; - - // It is called for all keys in compaction. When partitioner want to create - // new SST file it needs to return true. It means compaction job will finish - // current SST file where last key is "prev_user_key" parameter and start new - // SST file where first key is "current_user_key". Returns decission if - // partition boundary was detected and compaction should create new file. - virtual PartitionerResult ShouldPartition( - const PartitionerRequest& request) = 0; - - // Called with smallest and largest keys in SST file when compation try to do - // trivial move. Returns true is partitioner allows to do trivial move. - virtual bool CanDoTrivialMove(const Slice& smallest_user_key, - const Slice& largest_user_key) = 0; - - // Context information of a compaction run - struct Context { - // Does this compaction run include all data files - bool is_full_compaction; - // Is this compaction requested by the client (true), - // or is it occurring as an automatic compaction process - bool is_manual_compaction; - // Output level for this compaction - int output_level; - // Smallest key for compaction - Slice smallest_user_key; - // Largest key for compaction - Slice largest_user_key; - }; -}; - -class SstPartitionerFactory { - public: - virtual ~SstPartitionerFactory() {} - - virtual std::unique_ptr CreatePartitioner( - const SstPartitioner::Context& context) const = 0; - - // Returns a name that identifies this partitioner factory. - virtual const char* Name() const = 0; -}; - -/* - * Fixed key prefix partitioner. It splits the output SST files when prefix - * defined by size changes. - */ -class SstPartitionerFixedPrefix : public SstPartitioner { - public: - explicit SstPartitionerFixedPrefix(size_t len) : len_(len) {} - - virtual ~SstPartitionerFixedPrefix() override {} - - const char* Name() const override { return "SstPartitionerFixedPrefix"; } - - PartitionerResult ShouldPartition(const PartitionerRequest& request) override; - - bool CanDoTrivialMove(const Slice& smallest_user_key, - const Slice& largest_user_key) override; - - private: - size_t len_; -}; - -/* - * Factory for fixed prefix partitioner. - */ -class SstPartitionerFixedPrefixFactory : public SstPartitionerFactory { - public: - explicit SstPartitionerFixedPrefixFactory(size_t len) : len_(len) {} - - virtual ~SstPartitionerFixedPrefixFactory() {} - - const char* Name() const override { - return "SstPartitionerFixedPrefixFactory"; - } - - std::unique_ptr CreatePartitioner( - const SstPartitioner::Context& /* context */) const override; - - private: - size_t len_; -}; - -extern std::shared_ptr -NewSstPartitionerFixedPrefixFactory(size_t prefix_len); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/statistics.h b/dist/darwin_amd64/include/rocksdb/statistics.h deleted file mode 100644 index 98b4fb9..0000000 --- a/dist/darwin_amd64/include/rocksdb/statistics.h +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -/** - * Keep adding ticker's here. - * 1. Any ticker should be added before TICKER_ENUM_MAX. - * 2. Add a readable string in TickersNameMap below for the newly added ticker. - * 3. Add a corresponding enum value to TickerType.java in the java API - * 4. Add the enum conversions from Java and C++ to portal.h's toJavaTickerType - * and toCppTickers - */ -enum Tickers : uint32_t { - // total block cache misses - // REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + - // BLOCK_CACHE_FILTER_MISS + - // BLOCK_CACHE_DATA_MISS; - BLOCK_CACHE_MISS = 0, - // total block cache hit - // REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + - // BLOCK_CACHE_FILTER_HIT + - // BLOCK_CACHE_DATA_HIT; - BLOCK_CACHE_HIT, - // # of blocks added to block cache. - BLOCK_CACHE_ADD, - // # of failures when adding blocks to block cache. - BLOCK_CACHE_ADD_FAILURES, - // # of times cache miss when accessing index block from block cache. - BLOCK_CACHE_INDEX_MISS, - // # of times cache hit when accessing index block from block cache. - BLOCK_CACHE_INDEX_HIT, - // # of index blocks added to block cache. - BLOCK_CACHE_INDEX_ADD, - // # of bytes of index blocks inserted into cache - BLOCK_CACHE_INDEX_BYTES_INSERT, - // # of bytes of index block erased from cache - BLOCK_CACHE_INDEX_BYTES_EVICT, - // # of times cache miss when accessing filter block from block cache. - BLOCK_CACHE_FILTER_MISS, - // # of times cache hit when accessing filter block from block cache. - BLOCK_CACHE_FILTER_HIT, - // # of filter blocks added to block cache. - BLOCK_CACHE_FILTER_ADD, - // # of bytes of bloom filter blocks inserted into cache - BLOCK_CACHE_FILTER_BYTES_INSERT, - // # of bytes of bloom filter block erased from cache - BLOCK_CACHE_FILTER_BYTES_EVICT, - // # of times cache miss when accessing data block from block cache. - BLOCK_CACHE_DATA_MISS, - // # of times cache hit when accessing data block from block cache. - BLOCK_CACHE_DATA_HIT, - // # of data blocks added to block cache. - BLOCK_CACHE_DATA_ADD, - // # of bytes of data blocks inserted into cache - BLOCK_CACHE_DATA_BYTES_INSERT, - // # of bytes read from cache. - BLOCK_CACHE_BYTES_READ, - // # of bytes written into cache. - BLOCK_CACHE_BYTES_WRITE, - - // # of times bloom filter has avoided file reads, i.e., negatives. - BLOOM_FILTER_USEFUL, - // # of times bloom FullFilter has not avoided the reads. - BLOOM_FILTER_FULL_POSITIVE, - // # of times bloom FullFilter has not avoided the reads and data actually - // exist. - BLOOM_FILTER_FULL_TRUE_POSITIVE, - - BLOOM_FILTER_MICROS, - - // # persistent cache hit - PERSISTENT_CACHE_HIT, - // # persistent cache miss - PERSISTENT_CACHE_MISS, - - // # total simulation block cache hits - SIM_BLOCK_CACHE_HIT, - // # total simulation block cache misses - SIM_BLOCK_CACHE_MISS, - - // # of memtable hits. - MEMTABLE_HIT, - // # of memtable misses. - MEMTABLE_MISS, - - // # of Get() queries served by L0 - GET_HIT_L0, - // # of Get() queries served by L1 - GET_HIT_L1, - // # of Get() queries served by L2 and up - GET_HIT_L2_AND_UP, - - /** - * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction - * There are 4 reasons currently. - */ - COMPACTION_KEY_DROP_NEWER_ENTRY, // key was written with a newer value. - // Also includes keys dropped for range del. - COMPACTION_KEY_DROP_OBSOLETE, // The key is obsolete. - COMPACTION_KEY_DROP_RANGE_DEL, // key was covered by a range tombstone. - COMPACTION_KEY_DROP_USER, // user compaction function has dropped the key. - COMPACTION_RANGE_DEL_DROP_OBSOLETE, // all keys in range were deleted. - // Deletions obsoleted before bottom level due to file gap optimization. - COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, - // If a compaction was cancelled in sfm to prevent ENOSPC - COMPACTION_CANCELLED, - - // Number of keys written to the database via the Put and Write call's - NUMBER_KEYS_WRITTEN, - // Number of Keys read, - NUMBER_KEYS_READ, - // Number keys updated, if inplace update is enabled - NUMBER_KEYS_UPDATED, - // The number of uncompressed bytes issued by DB::Put(), DB::Delete(), - // DB::Merge(), and DB::Write(). - BYTES_WRITTEN, - // The number of uncompressed bytes read from DB::Get(). It could be - // either from memtables, cache, or table files. - // For the number of logical bytes read from DB::MultiGet(), - // please use NUMBER_MULTIGET_BYTES_READ. - BYTES_READ, - // The number of calls to seek/next/prev - NUMBER_DB_SEEK, - NUMBER_DB_NEXT, - NUMBER_DB_PREV, - // The number of calls to seek/next/prev that returned data - NUMBER_DB_SEEK_FOUND, - NUMBER_DB_NEXT_FOUND, - NUMBER_DB_PREV_FOUND, - // The number of uncompressed bytes read from an iterator. - // Includes size of key and value. - ITER_BYTES_READ, - NO_FILE_CLOSES, - NO_FILE_OPENS, - NO_FILE_ERRORS, - // DEPRECATED Time system had to wait to do LO-L1 compactions - STALL_L0_SLOWDOWN_MICROS, - // DEPRECATED Time system had to wait to move memtable to L1. - STALL_MEMTABLE_COMPACTION_MICROS, - // DEPRECATED write throttle because of too many files in L0 - STALL_L0_NUM_FILES_MICROS, - // Writer has to wait for compaction or flush to finish. - STALL_MICROS, - // The wait time for db mutex. - // Disabled by default. To enable it set stats level to kAll - DB_MUTEX_WAIT_MICROS, - RATE_LIMIT_DELAY_MILLIS, - // DEPRECATED number of iterators currently open - NO_ITERATORS, - - // Number of MultiGet calls, keys read, and bytes read - NUMBER_MULTIGET_CALLS, - NUMBER_MULTIGET_KEYS_READ, - NUMBER_MULTIGET_BYTES_READ, - - // Number of deletes records that were not required to be - // written to storage because key does not exist - NUMBER_FILTERED_DELETES, - NUMBER_MERGE_FAILURES, - - // number of times bloom was checked before creating iterator on a - // file, and the number of times the check was useful in avoiding - // iterator creation (and thus likely IOPs). - BLOOM_FILTER_PREFIX_CHECKED, - BLOOM_FILTER_PREFIX_USEFUL, - - // Number of times we had to reseek inside an iteration to skip - // over large number of keys with same userkey. - NUMBER_OF_RESEEKS_IN_ITERATION, - - // Record the number of calls to GetUpadtesSince. Useful to keep track of - // transaction log iterator refreshes - GET_UPDATES_SINCE_CALLS, - BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache - BLOCK_CACHE_COMPRESSED_HIT, // hit in the compressed block cache - // Number of blocks added to compressed block cache - BLOCK_CACHE_COMPRESSED_ADD, - // Number of failures when adding blocks to compressed block cache - BLOCK_CACHE_COMPRESSED_ADD_FAILURES, - WAL_FILE_SYNCED, // Number of times WAL sync is done - WAL_FILE_BYTES, // Number of bytes written to WAL - - // Writes can be processed by requesting thread or by the thread at the - // head of the writers queue. - WRITE_DONE_BY_SELF, - WRITE_DONE_BY_OTHER, // Equivalent to writes done for others - WRITE_TIMEDOUT, // Number of writes ending up with timed-out. - WRITE_WITH_WAL, // Number of Write calls that request WAL - COMPACT_READ_BYTES, // Bytes read during compaction - COMPACT_WRITE_BYTES, // Bytes written during compaction - FLUSH_WRITE_BYTES, // Bytes written during flush - - // Compaction read and write statistics broken down by CompactionReason - COMPACT_READ_BYTES_MARKED, - COMPACT_READ_BYTES_PERIODIC, - COMPACT_READ_BYTES_TTL, - COMPACT_WRITE_BYTES_MARKED, - COMPACT_WRITE_BYTES_PERIODIC, - COMPACT_WRITE_BYTES_TTL, - - // Number of table's properties loaded directly from file, without creating - // table reader object. - NUMBER_DIRECT_LOAD_TABLE_PROPERTIES, - NUMBER_SUPERVERSION_ACQUIRES, - NUMBER_SUPERVERSION_RELEASES, - NUMBER_SUPERVERSION_CLEANUPS, - - // # of compressions/decompressions executed - NUMBER_BLOCK_COMPRESSED, - NUMBER_BLOCK_DECOMPRESSED, - - NUMBER_BLOCK_NOT_COMPRESSED, - MERGE_OPERATION_TOTAL_TIME, - FILTER_OPERATION_TOTAL_TIME, - - // Row cache. - ROW_CACHE_HIT, - ROW_CACHE_MISS, - - // Read amplification statistics. - // Read amplification can be calculated using this formula - // (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) - // - // REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled - READ_AMP_ESTIMATE_USEFUL_BYTES, // Estimate of total bytes actually used. - READ_AMP_TOTAL_READ_BYTES, // Total size of loaded data blocks. - - // Number of refill intervals where rate limiter's bytes are fully consumed. - NUMBER_RATE_LIMITER_DRAINS, - - // Number of internal keys skipped by Iterator - NUMBER_ITER_SKIP, - - // BlobDB specific stats - // # of Put/PutTTL/PutUntil to BlobDB. - BLOB_DB_NUM_PUT, - // # of Write to BlobDB. - BLOB_DB_NUM_WRITE, - // # of Get to BlobDB. - BLOB_DB_NUM_GET, - // # of MultiGet to BlobDB. - BLOB_DB_NUM_MULTIGET, - // # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. - BLOB_DB_NUM_SEEK, - // # of Next to BlobDB iterator. - BLOB_DB_NUM_NEXT, - // # of Prev to BlobDB iterator. - BLOB_DB_NUM_PREV, - // # of keys written to BlobDB. - BLOB_DB_NUM_KEYS_WRITTEN, - // # of keys read from BlobDB. - BLOB_DB_NUM_KEYS_READ, - // # of bytes (key + value) written to BlobDB. - BLOB_DB_BYTES_WRITTEN, - // # of bytes (keys + value) read from BlobDB. - BLOB_DB_BYTES_READ, - // # of keys written by BlobDB as non-TTL inlined value. - BLOB_DB_WRITE_INLINED, - // # of keys written by BlobDB as TTL inlined value. - BLOB_DB_WRITE_INLINED_TTL, - // # of keys written by BlobDB as non-TTL blob value. - BLOB_DB_WRITE_BLOB, - // # of keys written by BlobDB as TTL blob value. - BLOB_DB_WRITE_BLOB_TTL, - // # of bytes written to blob file. - BLOB_DB_BLOB_FILE_BYTES_WRITTEN, - // # of bytes read from blob file. - BLOB_DB_BLOB_FILE_BYTES_READ, - // # of times a blob files being synced. - BLOB_DB_BLOB_FILE_SYNCED, - // # of blob index evicted from base DB by BlobDB compaction filter because - // of expiration. - BLOB_DB_BLOB_INDEX_EXPIRED_COUNT, - // size of blob index evicted from base DB by BlobDB compaction filter - // because of expiration. - BLOB_DB_BLOB_INDEX_EXPIRED_SIZE, - // # of blob index evicted from base DB by BlobDB compaction filter because - // of corresponding file deleted. - BLOB_DB_BLOB_INDEX_EVICTED_COUNT, - // size of blob index evicted from base DB by BlobDB compaction filter - // because of corresponding file deleted. - BLOB_DB_BLOB_INDEX_EVICTED_SIZE, - // # of blob files that were obsoleted by garbage collection. - BLOB_DB_GC_NUM_FILES, - // # of blob files generated by garbage collection. - BLOB_DB_GC_NUM_NEW_FILES, - // # of BlobDB garbage collection failures. - BLOB_DB_GC_FAILURES, - // # of keys dropped by BlobDB garbage collection because they had been - // overwritten. DEPRECATED. - BLOB_DB_GC_NUM_KEYS_OVERWRITTEN, - // # of keys dropped by BlobDB garbage collection because of expiration. - // DEPRECATED. - BLOB_DB_GC_NUM_KEYS_EXPIRED, - // # of keys relocated to new blob file by garbage collection. - BLOB_DB_GC_NUM_KEYS_RELOCATED, - // # of bytes dropped by BlobDB garbage collection because they had been - // overwritten. DEPRECATED. - BLOB_DB_GC_BYTES_OVERWRITTEN, - // # of bytes dropped by BlobDB garbage collection because of expiration. - // DEPRECATED. - BLOB_DB_GC_BYTES_EXPIRED, - // # of bytes relocated to new blob file by garbage collection. - BLOB_DB_GC_BYTES_RELOCATED, - // # of blob files evicted because of BlobDB is full. - BLOB_DB_FIFO_NUM_FILES_EVICTED, - // # of keys in the blob files evicted because of BlobDB is full. - BLOB_DB_FIFO_NUM_KEYS_EVICTED, - // # of bytes in the blob files evicted because of BlobDB is full. - BLOB_DB_FIFO_BYTES_EVICTED, - - // These counters indicate a performance issue in WritePrepared transactions. - // We should not seem them ticking them much. - // # of times prepare_mutex_ is acquired in the fast path. - TXN_PREPARE_MUTEX_OVERHEAD, - // # of times old_commit_map_mutex_ is acquired in the fast path. - TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD, - // # of times we checked a batch for duplicate keys. - TXN_DUPLICATE_KEY_OVERHEAD, - // # of times snapshot_mutex_ is acquired in the fast path. - TXN_SNAPSHOT_MUTEX_OVERHEAD, - // # of times ::Get returned TryAgain due to expired snapshot seq - TXN_GET_TRY_AGAIN, - - // Number of keys actually found in MultiGet calls (vs number requested by - // caller) - // NUMBER_MULTIGET_KEYS_READ gives the number requested by caller - NUMBER_MULTIGET_KEYS_FOUND, - - NO_ITERATOR_CREATED, // number of iterators created - NO_ITERATOR_DELETED, // number of iterators deleted - - BLOCK_CACHE_COMPRESSION_DICT_MISS, - BLOCK_CACHE_COMPRESSION_DICT_HIT, - BLOCK_CACHE_COMPRESSION_DICT_ADD, - BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT, - BLOCK_CACHE_COMPRESSION_DICT_BYTES_EVICT, - - // # of blocks redundantly inserted into block cache. - // REQUIRES: BLOCK_CACHE_ADD_REDUNDANT <= BLOCK_CACHE_ADD - BLOCK_CACHE_ADD_REDUNDANT, - // # of index blocks redundantly inserted into block cache. - // REQUIRES: BLOCK_CACHE_INDEX_ADD_REDUNDANT <= BLOCK_CACHE_INDEX_ADD - BLOCK_CACHE_INDEX_ADD_REDUNDANT, - // # of filter blocks redundantly inserted into block cache. - // REQUIRES: BLOCK_CACHE_FILTER_ADD_REDUNDANT <= BLOCK_CACHE_FILTER_ADD - BLOCK_CACHE_FILTER_ADD_REDUNDANT, - // # of data blocks redundantly inserted into block cache. - // REQUIRES: BLOCK_CACHE_DATA_ADD_REDUNDANT <= BLOCK_CACHE_DATA_ADD - BLOCK_CACHE_DATA_ADD_REDUNDANT, - // # of dict blocks redundantly inserted into block cache. - // REQUIRES: BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT - // <= BLOCK_CACHE_COMPRESSION_DICT_ADD - BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT, - - // # of files marked as trash by sst file manager and will be deleted - // later by background thread. - FILES_MARKED_TRASH, - // # of files deleted immediately by sst file manger through delete scheduler. - FILES_DELETED_IMMEDIATELY, - - TICKER_ENUM_MAX -}; - -// The order of items listed in Tickers should be the same as -// the order listed in TickersNameMap -extern const std::vector> TickersNameMap; - -/** - * Keep adding histogram's here. - * Any histogram should have value less than HISTOGRAM_ENUM_MAX - * Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX - * Add a string representation in HistogramsNameMap below - * And increment HISTOGRAM_ENUM_MAX - * Add a corresponding enum value to HistogramType.java in the java API - */ -enum Histograms : uint32_t { - DB_GET = 0, - DB_WRITE, - COMPACTION_TIME, - COMPACTION_CPU_TIME, - SUBCOMPACTION_SETUP_TIME, - TABLE_SYNC_MICROS, - COMPACTION_OUTFILE_SYNC_MICROS, - WAL_FILE_SYNC_MICROS, - MANIFEST_FILE_SYNC_MICROS, - // TIME SPENT IN IO DURING TABLE OPEN - TABLE_OPEN_IO_MICROS, - DB_MULTIGET, - READ_BLOCK_COMPACTION_MICROS, - READ_BLOCK_GET_MICROS, - WRITE_RAW_BLOCK_MICROS, - STALL_L0_SLOWDOWN_COUNT, - STALL_MEMTABLE_COMPACTION_COUNT, - STALL_L0_NUM_FILES_COUNT, - HARD_RATE_LIMIT_DELAY_COUNT, - SOFT_RATE_LIMIT_DELAY_COUNT, - NUM_FILES_IN_SINGLE_COMPACTION, - DB_SEEK, - WRITE_STALL, - SST_READ_MICROS, - // The number of subcompactions actually scheduled during a compaction - NUM_SUBCOMPACTIONS_SCHEDULED, - // Value size distribution in each operation - BYTES_PER_READ, - BYTES_PER_WRITE, - BYTES_PER_MULTIGET, - - // number of bytes compressed/decompressed - // number of bytes is when uncompressed; i.e. before/after respectively - BYTES_COMPRESSED, - BYTES_DECOMPRESSED, - COMPRESSION_TIMES_NANOS, - DECOMPRESSION_TIMES_NANOS, - // Number of merge operands passed to the merge operator in user read - // requests. - READ_NUM_MERGE_OPERANDS, - - // BlobDB specific stats - // Size of keys written to BlobDB. - BLOB_DB_KEY_SIZE, - // Size of values written to BlobDB. - BLOB_DB_VALUE_SIZE, - // BlobDB Put/PutWithTTL/PutUntil/Write latency. - BLOB_DB_WRITE_MICROS, - // BlobDB Get lagency. - BLOB_DB_GET_MICROS, - // BlobDB MultiGet latency. - BLOB_DB_MULTIGET_MICROS, - // BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency. - BLOB_DB_SEEK_MICROS, - // BlobDB Next latency. - BLOB_DB_NEXT_MICROS, - // BlobDB Prev latency. - BLOB_DB_PREV_MICROS, - // Blob file write latency. - BLOB_DB_BLOB_FILE_WRITE_MICROS, - // Blob file read latency. - BLOB_DB_BLOB_FILE_READ_MICROS, - // Blob file sync latency. - BLOB_DB_BLOB_FILE_SYNC_MICROS, - // BlobDB garbage collection time. DEPRECATED. - BLOB_DB_GC_MICROS, - // BlobDB compression time. - BLOB_DB_COMPRESSION_MICROS, - // BlobDB decompression time. - BLOB_DB_DECOMPRESSION_MICROS, - // Time spent flushing memtable to disk - FLUSH_TIME, - SST_BATCH_SIZE, - - // MultiGet stats logged per level - // Num of index and filter blocks read from file system per level. - NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL, - // Num of data blocks read from file system per level. - NUM_DATA_BLOCKS_READ_PER_LEVEL, - // Num of sst files read from file system per level. - NUM_SST_READ_PER_LEVEL, - - HISTOGRAM_ENUM_MAX, -}; - -extern const std::vector> HistogramsNameMap; - -struct HistogramData { - double median; - double percentile95; - double percentile99; - double average; - double standard_deviation; - // zero-initialize new members since old Statistics::histogramData() - // implementations won't write them. - double max = 0.0; - uint64_t count = 0; - uint64_t sum = 0; - double min = 0.0; -}; - -// StatsLevel can be used to reduce statistics overhead by skipping certain -// types of stats in the stats collection process. -// Usage: -// options.statistics->set_stats_level(StatsLevel::kExceptTimeForMutex); -enum StatsLevel : uint8_t { - // Disable all metrics - kDisableAll, - // Disable tickers - kExceptTickers = kDisableAll, - // Disable timer stats, and skip histogram stats - kExceptHistogramOrTimers, - // Skip timer stats - kExceptTimers, - // Collect all stats except time inside mutex lock AND time spent on - // compression. - kExceptDetailedTimers, - // Collect all stats except the counters requiring to get time inside the - // mutex lock. - kExceptTimeForMutex, - // Collect all stats, including measuring duration of mutex operations. - // If getting time is expensive on the platform to run, it can - // reduce scalability to more threads, especially for writes. - kAll, -}; - -// Analyze the performance of a db by providing cumulative stats over time. -// Usage: -// Options options; -// options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics(); -// Status s = DB::Open(options, kDBPath, &db); -// ... -// options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED); -// HistogramData hist; -// options.statistics->histogramData(FLUSH_TIME, &hist); -class Statistics { - public: - virtual ~Statistics() {} - static const char* Type() { return "Statistics"; } - virtual uint64_t getTickerCount(uint32_t tickerType) const = 0; - virtual void histogramData(uint32_t type, - HistogramData* const data) const = 0; - virtual std::string getHistogramString(uint32_t /*type*/) const { return ""; } - virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0; - virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0; - virtual uint64_t getAndResetTickerCount(uint32_t tickerType) = 0; - virtual void reportTimeToHistogram(uint32_t histogramType, uint64_t time) { - if (get_stats_level() <= StatsLevel::kExceptTimers) { - return; - } - recordInHistogram(histogramType, time); - } - // The function is here only for backward compatibility reason. - // Users implementing their own Statistics class should override - // recordInHistogram() instead and leave measureTime() as it is. - virtual void measureTime(uint32_t /*histogramType*/, uint64_t /*time*/) { - // This is not supposed to be called. - assert(false); - } - virtual void recordInHistogram(uint32_t histogramType, uint64_t time) { - // measureTime() is the old and inaccurate function name. - // To keep backward compatible. If users implement their own - // statistics, which overrides measureTime() but doesn't override - // this function. We forward to measureTime(). - measureTime(histogramType, time); - } - - // Resets all ticker and histogram stats - virtual Status Reset() { return Status::NotSupported("Not implemented"); } - - // String representation of the statistic object. - virtual std::string ToString() const { - // Do nothing by default - return std::string("ToString(): not implemented"); - } - - virtual bool getTickerMap(std::map*) const { - // Do nothing by default - return false; - } - - // Override this function to disable particular histogram collection - virtual bool HistEnabledForType(uint32_t type) const { - return type < HISTOGRAM_ENUM_MAX; - } - void set_stats_level(StatsLevel sl) { - stats_level_.store(sl, std::memory_order_relaxed); - } - StatsLevel get_stats_level() const { - return stats_level_.load(std::memory_order_relaxed); - } - - private: - std::atomic stats_level_{kExceptDetailedTimers}; -}; - -// Create a concrete DBStatistics object -std::shared_ptr CreateDBStatistics(); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/stats_history.h b/dist/darwin_amd64/include/rocksdb/stats_history.h deleted file mode 100644 index 4acaad2..0000000 --- a/dist/darwin_amd64/include/rocksdb/stats_history.h +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include -#include - -#include "rocksdb/statistics.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class DBImpl; - -// StatsHistoryIterator is the main interface for users to programmatically -// access statistics snapshots that was automatically stored by RocksDB. -// Depending on options, the stats can be in memory or on disk. -// The stats snapshots are indexed by time that they were recorded, and each -// stats snapshot contains individual stat name and value at the time of -// recording. -// Example: -// std::unique_ptr stats_iter; -// Status s = db->GetStatsHistory(0 /* start_time */, -// env->NowMicros() /* end_time*/, -// &stats_iter); -// if (s.ok) { -// for (; stats_iter->Valid(); stats_iter->Next()) { -// uint64_t stats_time = stats_iter->GetStatsTime(); -// const std::map& stats_map = -// stats_iter->GetStatsMap(); -// process(stats_time, stats_map); -// } -// } -class StatsHistoryIterator { - public: - StatsHistoryIterator() {} - virtual ~StatsHistoryIterator() {} - - virtual bool Valid() const = 0; - - // Moves to the next stats history record. After this call, Valid() is - // true iff the iterator was not positioned at the last entry in the source. - // REQUIRES: Valid() - virtual void Next() = 0; - - // Return the time stamp (in seconds) when stats history is recorded. - // REQUIRES: Valid() - virtual uint64_t GetStatsTime() const = 0; - - virtual int GetFormatVersion() const { return -1; } - - // Return the current stats history as an std::map which specifies the - // mapping from stats name to stats value . The underlying storage - // for the returned map is valid only until the next modification of - // the iterator. - // REQUIRES: Valid() - virtual const std::map& GetStatsMap() const = 0; - - // If an error has occurred, return it. Else return an ok status. - virtual Status status() const = 0; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/status.h b/dist/darwin_amd64/include/rocksdb/status.h deleted file mode 100644 index bcc55e4..0000000 --- a/dist/darwin_amd64/include/rocksdb/status.h +++ /dev/null @@ -1,608 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// A Status encapsulates the result of an operation. It may indicate success, -// or it may indicate an error with an associated error message. -// -// Multiple threads can invoke const methods on a Status without -// external synchronization, but if any of the threads may call a -// non-const method, all threads accessing the same Status must use -// external synchronization. - -#pragma once - -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED -#include -#include -#endif - -#include - -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED -#include "port/stack_trace.h" -#endif - -#include "rocksdb/slice.h" - -namespace ROCKSDB_NAMESPACE { - -class Status { - public: - // Create a success status. - Status() : code_(kOk), subcode_(kNone), sev_(kNoError), state_(nullptr) {} - ~Status() { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - if (!checked_) { - fprintf(stderr, "Failed to check Status %p\n", this); - port::PrintStack(); - abort(); - } -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - delete[] state_; - } - - // Copy the specified status. - Status(const Status& s); - Status& operator=(const Status& s); - Status(Status&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif - ; - Status& operator=(Status&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif - ; - bool operator==(const Status& rhs) const; - bool operator!=(const Status& rhs) const; - - // In case of intentionally swallowing an error, user must explicitly call - // this function. That way we are easily able to search the code to find where - // error swallowing occurs. - void PermitUncheckedError() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - } - - enum Code : unsigned char { - kOk = 0, - kNotFound = 1, - kCorruption = 2, - kNotSupported = 3, - kInvalidArgument = 4, - kIOError = 5, - kMergeInProgress = 6, - kIncomplete = 7, - kShutdownInProgress = 8, - kTimedOut = 9, - kAborted = 10, - kBusy = 11, - kExpired = 12, - kTryAgain = 13, - kCompactionTooLarge = 14, - kColumnFamilyDropped = 15, - kMaxCode - }; - - Code code() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code_; - } - - enum SubCode : unsigned char { - kNone = 0, - kMutexTimeout = 1, - kLockTimeout = 2, - kLockLimit = 3, - kNoSpace = 4, - kDeadlock = 5, - kStaleFile = 6, - kMemoryLimit = 7, - kSpaceLimit = 8, - kPathNotFound = 9, - KMergeOperandsInsufficientCapacity = 10, - kManualCompactionPaused = 11, - kOverwritten = 12, - kTxnNotPrepared = 13, - kIOFenced = 14, - kMaxSubCode - }; - - SubCode subcode() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return subcode_; - } - - enum Severity : unsigned char { - kNoError = 0, - kSoftError = 1, - kHardError = 2, - kFatalError = 3, - kUnrecoverableError = 4, - kMaxSeverity - }; - - Status(const Status& s, Severity sev); - Severity severity() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return sev_; - } - - // Returns a C style string indicating the message of the Status - const char* getState() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return state_; - } - - // Return a success status. - static Status OK() { return Status(); } - - // Successful, though an existing something was overwritten - // Note: using variants of OK status for program logic is discouraged, - // but it can be useful for communicating statistical information without - // changing public APIs. - static Status OkOverwritten() { return Status(kOk, kOverwritten); } - - // Return error status of an appropriate type. - static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kNotFound, msg, msg2); - } - - // Fast path for not found without malloc; - static Status NotFound(SubCode msg = kNone) { return Status(kNotFound, msg); } - - static Status NotFound(SubCode sc, const Slice& msg, - const Slice& msg2 = Slice()) { - return Status(kNotFound, sc, msg, msg2); - } - - static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kCorruption, msg, msg2); - } - static Status Corruption(SubCode msg = kNone) { - return Status(kCorruption, msg); - } - - static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kNotSupported, msg, msg2); - } - static Status NotSupported(SubCode msg = kNone) { - return Status(kNotSupported, msg); - } - - static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kInvalidArgument, msg, msg2); - } - static Status InvalidArgument(SubCode msg = kNone) { - return Status(kInvalidArgument, msg); - } - - static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kIOError, msg, msg2); - } - static Status IOError(SubCode msg = kNone) { return Status(kIOError, msg); } - - static Status MergeInProgress(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kMergeInProgress, msg, msg2); - } - static Status MergeInProgress(SubCode msg = kNone) { - return Status(kMergeInProgress, msg); - } - - static Status Incomplete(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kIncomplete, msg, msg2); - } - static Status Incomplete(SubCode msg = kNone) { - return Status(kIncomplete, msg); - } - - static Status ShutdownInProgress(SubCode msg = kNone) { - return Status(kShutdownInProgress, msg); - } - static Status ShutdownInProgress(const Slice& msg, - const Slice& msg2 = Slice()) { - return Status(kShutdownInProgress, msg, msg2); - } - static Status Aborted(SubCode msg = kNone) { return Status(kAborted, msg); } - static Status Aborted(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kAborted, msg, msg2); - } - - static Status Busy(SubCode msg = kNone) { return Status(kBusy, msg); } - static Status Busy(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kBusy, msg, msg2); - } - - static Status TimedOut(SubCode msg = kNone) { return Status(kTimedOut, msg); } - static Status TimedOut(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kTimedOut, msg, msg2); - } - - static Status Expired(SubCode msg = kNone) { return Status(kExpired, msg); } - static Status Expired(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kExpired, msg, msg2); - } - - static Status TryAgain(SubCode msg = kNone) { return Status(kTryAgain, msg); } - static Status TryAgain(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kTryAgain, msg, msg2); - } - - static Status CompactionTooLarge(SubCode msg = kNone) { - return Status(kCompactionTooLarge, msg); - } - static Status CompactionTooLarge(const Slice& msg, - const Slice& msg2 = Slice()) { - return Status(kCompactionTooLarge, msg, msg2); - } - - static Status ColumnFamilyDropped(SubCode msg = kNone) { - return Status(kColumnFamilyDropped, msg); - } - - static Status ColumnFamilyDropped(const Slice& msg, - const Slice& msg2 = Slice()) { - return Status(kColumnFamilyDropped, msg, msg2); - } - - static Status NoSpace() { return Status(kIOError, kNoSpace); } - static Status NoSpace(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kIOError, kNoSpace, msg, msg2); - } - - static Status MemoryLimit() { return Status(kAborted, kMemoryLimit); } - static Status MemoryLimit(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kAborted, kMemoryLimit, msg, msg2); - } - - static Status SpaceLimit() { return Status(kIOError, kSpaceLimit); } - static Status SpaceLimit(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kIOError, kSpaceLimit, msg, msg2); - } - - static Status PathNotFound() { return Status(kIOError, kPathNotFound); } - static Status PathNotFound(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kIOError, kPathNotFound, msg, msg2); - } - - static Status TxnNotPrepared() { - return Status(kInvalidArgument, kTxnNotPrepared); - } - static Status TxnNotPrepared(const Slice& msg, const Slice& msg2 = Slice()) { - return Status(kInvalidArgument, kTxnNotPrepared, msg, msg2); - } - - // Returns true iff the status indicates success. - bool ok() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kOk; - } - - // Returns true iff the status indicates success *with* something - // overwritten - bool IsOkOverwritten() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kOk && subcode() == kOverwritten; - } - - // Returns true iff the status indicates a NotFound error. - bool IsNotFound() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kNotFound; - } - - // Returns true iff the status indicates a Corruption error. - bool IsCorruption() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kCorruption; - } - - // Returns true iff the status indicates a NotSupported error. - bool IsNotSupported() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kNotSupported; - } - - // Returns true iff the status indicates an InvalidArgument error. - bool IsInvalidArgument() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kInvalidArgument; - } - - // Returns true iff the status indicates an IOError. - bool IsIOError() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kIOError; - } - - // Returns true iff the status indicates an MergeInProgress. - bool IsMergeInProgress() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kMergeInProgress; - } - - // Returns true iff the status indicates Incomplete - bool IsIncomplete() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kIncomplete; - } - - // Returns true iff the status indicates Shutdown In progress - bool IsShutdownInProgress() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kShutdownInProgress; - } - - bool IsTimedOut() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kTimedOut; - } - - bool IsAborted() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kAborted; - } - - bool IsLockLimit() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kAborted && subcode() == kLockLimit; - } - - // Returns true iff the status indicates that a resource is Busy and - // temporarily could not be acquired. - bool IsBusy() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kBusy; - } - - bool IsDeadlock() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kBusy && subcode() == kDeadlock; - } - - // Returns true iff the status indicated that the operation has Expired. - bool IsExpired() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kExpired; - } - - // Returns true iff the status indicates a TryAgain error. - // This usually means that the operation failed, but may succeed if - // re-attempted. - bool IsTryAgain() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kTryAgain; - } - - // Returns true iff the status indicates the proposed compaction is too large - bool IsCompactionTooLarge() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kCompactionTooLarge; - } - - // Returns true iff the status indicates Column Family Dropped - bool IsColumnFamilyDropped() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return code() == kColumnFamilyDropped; - } - - // Returns true iff the status indicates a NoSpace error - // This is caused by an I/O error returning the specific "out of space" - // error condition. Stricto sensu, an NoSpace error is an I/O error - // with a specific subcode, enabling users to take the appropriate action - // if needed - bool IsNoSpace() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code() == kIOError) && (subcode() == kNoSpace); - } - - // Returns true iff the status indicates a memory limit error. There may be - // cases where we limit the memory used in certain operations (eg. the size - // of a write batch) in order to avoid out of memory exceptions. - bool IsMemoryLimit() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code() == kAborted) && (subcode() == kMemoryLimit); - } - - // Returns true iff the status indicates a PathNotFound error - // This is caused by an I/O error returning the specific "no such file or - // directory" error condition. A PathNotFound error is an I/O error with - // a specific subcode, enabling users to take appropriate action if necessary - bool IsPathNotFound() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code() == kIOError || code() == kNotFound) && - (subcode() == kPathNotFound); - } - - // Returns true iff the status indicates manual compaction paused. This - // is caused by a call to PauseManualCompaction - bool IsManualCompactionPaused() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code() == kIncomplete) && (subcode() == kManualCompactionPaused); - } - - // Returns true iff the status indicates a TxnNotPrepared error. - bool IsTxnNotPrepared() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code() == kInvalidArgument) && (subcode() == kTxnNotPrepared); - } - - // Returns true iff the status indicates a IOFenced error. - bool IsIOFenced() const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code() == kIOError) && (subcode() == kIOFenced); - } - - // Return a string representation of this status suitable for printing. - // Returns the string "OK" for success. - std::string ToString() const; - - protected: - // A nullptr state_ (which is always the case for OK) means the message - // is empty. - // of the following form: - // state_[0..3] == length of message - // state_[4..] == message - Code code_; - SubCode subcode_; - Severity sev_; - const char* state_; -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - mutable bool checked_ = false; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - - explicit Status(Code _code, SubCode _subcode = kNone) - : code_(_code), subcode_(_subcode), sev_(kNoError), state_(nullptr) {} - - Status(Code _code, SubCode _subcode, const Slice& msg, const Slice& msg2); - Status(Code _code, const Slice& msg, const Slice& msg2) - : Status(_code, kNone, msg, msg2) {} - - static const char* CopyState(const char* s); -}; - -inline Status::Status(const Status& s) - : code_(s.code_), subcode_(s.subcode_), sev_(s.sev_) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); -} -inline Status::Status(const Status& s, Severity sev) - : code_(s.code_), subcode_(s.subcode_), sev_(sev) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); -} -inline Status& Status::operator=(const Status& s) { - if (this != &s) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; - checked_ = false; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - code_ = s.code_; - subcode_ = s.subcode_; - sev_ = s.sev_; - delete[] state_; - state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); - } - return *this; -} - -inline Status::Status(Status&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif - : Status() { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - *this = std::move(s); -} - -inline Status& Status::operator=(Status&& s) -#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) - noexcept -#endif -{ - if (this != &s) { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - s.checked_ = true; - checked_ = false; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - code_ = std::move(s.code_); - s.code_ = kOk; - subcode_ = std::move(s.subcode_); - s.subcode_ = kNone; - sev_ = std::move(s.sev_); - s.sev_ = kNoError; - delete[] state_; - state_ = nullptr; - std::swap(state_, s.state_); - } - return *this; -} - -inline bool Status::operator==(const Status& rhs) const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; - rhs.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return (code_ == rhs.code_); -} - -inline bool Status::operator!=(const Status& rhs) const { -#ifdef ROCKSDB_ASSERT_STATUS_CHECKED - checked_ = true; - rhs.checked_ = true; -#endif // ROCKSDB_ASSERT_STATUS_CHECKED - return !(*this == rhs); -} - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/table.h b/dist/darwin_amd64/include/rocksdb/table.h deleted file mode 100644 index 8e0d144..0000000 --- a/dist/darwin_amd64/include/rocksdb/table.h +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// Currently we support two types of tables: plain table and block-based table. -// 1. Block-based table: this is the default table type that we inherited from -// LevelDB, which was designed for storing data in hard disk or flash -// device. -// 2. Plain table: it is one of RocksDB's SST file format optimized -// for low query latency on pure-memory or really low-latency media. -// -// A tutorial of rocksdb table formats is available here: -// https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats -// -// Example code is also available -// https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats#wiki-examples - -#pragma once - -#include -#include -#include - -#include "rocksdb/configurable.h" -#include "rocksdb/env.h" -#include "rocksdb/options.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -// -- Block-based Table -class Cache; -class FilterPolicy; -class FlushBlockPolicyFactory; -class PersistentCache; -class RandomAccessFile; -struct TableReaderOptions; -struct TableBuilderOptions; -class TableBuilder; -class TableFactory; -class TableReader; -class WritableFileWriter; -struct ConfigOptions; -struct EnvOptions; - -enum ChecksumType : char { - kNoChecksum = 0x0, - kCRC32c = 0x1, - kxxHash = 0x2, - kxxHash64 = 0x3, -}; - -// For advanced user only -struct BlockBasedTableOptions { - static const char* kName() { return "BlockTableOptions"; }; - // @flush_block_policy_factory creates the instances of flush block policy. - // which provides a configurable way to determine when to flush a block in - // the block based tables. If not set, table builder will use the default - // block flush policy, which cut blocks by block size (please refer to - // `FlushBlockBySizePolicy`). - std::shared_ptr flush_block_policy_factory; - - // TODO(kailiu) Temporarily disable this feature by making the default value - // to be false. - // - // TODO(ajkr) we need to update names of variables controlling meta-block - // caching as they should now apply to range tombstone and compression - // dictionary meta-blocks, in addition to index and filter meta-blocks. - // - // Indicating if we'd put index/filter blocks to the block cache. - // If not specified, each "table reader" object will pre-load index/filter - // block during table initialization. - bool cache_index_and_filter_blocks = false; - - // If cache_index_and_filter_blocks is enabled, cache index and filter - // blocks with high priority. If set to true, depending on implementation of - // block cache, index and filter blocks may be less likely to be evicted - // than data blocks. - bool cache_index_and_filter_blocks_with_high_priority = true; - - // if cache_index_and_filter_blocks is true and the below is true, then - // filter and index blocks are stored in the cache, but a reference is - // held in the "table reader" object so the blocks are pinned and only - // evicted from cache when the table reader is freed. - bool pin_l0_filter_and_index_blocks_in_cache = false; - - // If cache_index_and_filter_blocks is true and the below is true, then - // the top-level index of partitioned filter and index blocks are stored in - // the cache, but a reference is held in the "table reader" object so the - // blocks are pinned and only evicted from cache when the table reader is - // freed. This is not limited to l0 in LSM tree. - bool pin_top_level_index_and_filter = true; - - // The index type that will be used for this table. - enum IndexType : char { - // A space efficient index block that is optimized for - // binary-search-based index. - kBinarySearch = 0x00, - - // The hash index, if enabled, will do the hash lookup when - // `Options.prefix_extractor` is provided. - kHashSearch = 0x01, - - // A two-level index implementation. Both levels are binary search indexes. - kTwoLevelIndexSearch = 0x02, - - // Like kBinarySearch, but index also contains first key of each block. - // This allows iterators to defer reading the block until it's actually - // needed. May significantly reduce read amplification of short range scans. - // Without it, iterator seek usually reads one block from each level-0 file - // and from each level, which may be expensive. - // Works best in combination with: - // - IndexShorteningMode::kNoShortening, - // - custom FlushBlockPolicy to cut blocks at some meaningful boundaries, - // e.g. when prefix changes. - // Makes the index significantly bigger (2x or more), especially when keys - // are long. - kBinarySearchWithFirstKey = 0x03, - }; - - IndexType index_type = kBinarySearch; - - // The index type that will be used for the data block. - enum DataBlockIndexType : char { - kDataBlockBinarySearch = 0, // traditional block type - kDataBlockBinaryAndHash = 1, // additional hash index - }; - - DataBlockIndexType data_block_index_type = kDataBlockBinarySearch; - - // #entries/#buckets. It is valid only when data_block_hash_index_type is - // kDataBlockBinaryAndHash. - double data_block_hash_table_util_ratio = 0.75; - - // This option is now deprecated. No matter what value it is set to, - // it will behave as if hash_index_allow_collision=true. - bool hash_index_allow_collision = true; - - // Use the specified checksum type. Newly created table files will be - // protected with this checksum type. Old table files will still be readable, - // even though they have different checksum type. - ChecksumType checksum = kCRC32c; - - // Disable block cache. If this is set to true, - // then no block cache should be used, and the block_cache should - // point to a nullptr object. - bool no_block_cache = false; - - // If non-NULL use the specified cache for blocks. - // If NULL, rocksdb will automatically create and use an 8MB internal cache. - std::shared_ptr block_cache = nullptr; - - // If non-NULL use the specified cache for pages read from device - // IF NULL, no page cache is used - std::shared_ptr persistent_cache = nullptr; - - // If non-NULL use the specified cache for compressed blocks. - // If NULL, rocksdb will not use a compressed block cache. - // Note: though it looks similar to `block_cache`, RocksDB doesn't put the - // same type of object there. - std::shared_ptr block_cache_compressed = nullptr; - - // Approximate size of user data packed per block. Note that the - // block size specified here corresponds to uncompressed data. The - // actual size of the unit read from disk may be smaller if - // compression is enabled. This parameter can be changed dynamically. - size_t block_size = 4 * 1024; - - // This is used to close a block before it reaches the configured - // 'block_size'. If the percentage of free space in the current block is less - // than this specified number and adding a new record to the block will - // exceed the configured block size, then this block will be closed and the - // new record will be written to the next block. - int block_size_deviation = 10; - - // Number of keys between restart points for delta encoding of keys. - // This parameter can be changed dynamically. Most clients should - // leave this parameter alone. The minimum value allowed is 1. Any smaller - // value will be silently overwritten with 1. - int block_restart_interval = 16; - - // Same as block_restart_interval but used for the index block. - int index_block_restart_interval = 1; - - // Block size for partitioned metadata. Currently applied to indexes when - // kTwoLevelIndexSearch is used and to filters when partition_filters is used. - // Note: Since in the current implementation the filters and index partitions - // are aligned, an index/filter block is created when either index or filter - // block size reaches the specified limit. - // Note: this limit is currently applied to only index blocks; a filter - // partition is cut right after an index block is cut - // TODO(myabandeh): remove the note above when filter partitions are cut - // separately - uint64_t metadata_block_size = 4096; - - // Note: currently this option requires kTwoLevelIndexSearch to be set as - // well. - // TODO(myabandeh): remove the note above once the limitation is lifted - // Use partitioned full filters for each SST file. This option is - // incompatible with block-based filters. - bool partition_filters = false; - - // EXPERIMENTAL Option to generate Bloom filters that minimize memory - // internal fragmentation. - // - // When false, malloc_usable_size is not available, or format_version < 5, - // filters are generated without regard to internal fragmentation when - // loaded into memory (historical behavior). When true (and - // malloc_usable_size is available and format_version >= 5), then Bloom - // filters are generated to "round up" and "round down" their sizes to - // minimize internal fragmentation when loaded into memory, assuming the - // reading DB has the same memory allocation characteristics as the - // generating DB. This option does not break forward or backward - // compatibility. - // - // While individual filters will vary in bits/key and false positive rate - // when setting is true, the implementation attempts to maintain a weighted - // average FP rate for filters consistent with this option set to false. - // - // With Jemalloc for example, this setting is expected to save about 10% of - // the memory footprint and block cache charge of filters, while increasing - // disk usage of filters by about 1-2% due to encoding efficiency losses - // with variance in bits/key. - // - // NOTE: Because some memory counted by block cache might be unmapped pages - // within internal fragmentation, this option can increase observed RSS - // memory usage. With cache_index_and_filter_blocks=true, this option makes - // the block cache better at using space it is allowed. - // - // NOTE: Do not set to true if you do not trust malloc_usable_size. With - // this option, RocksDB might access an allocated memory object beyond its - // original size if malloc_usable_size says it is safe to do so. While this - // can be considered bad practice, it should not produce undefined behavior - // unless malloc_usable_size is buggy or broken. - bool optimize_filters_for_memory = false; - - // Use delta encoding to compress keys in blocks. - // ReadOptions::pin_data requires this option to be disabled. - // - // Default: true - bool use_delta_encoding = true; - - // If non-nullptr, use the specified filter policy to reduce disk reads. - // Many applications will benefit from passing the result of - // NewBloomFilterPolicy() here. - std::shared_ptr filter_policy = nullptr; - - // If true, place whole keys in the filter (not just prefixes). - // This must generally be true for gets to be efficient. - bool whole_key_filtering = true; - - // Verify that decompressing the compressed block gives back the input. This - // is a verification mode that we use to detect bugs in compression - // algorithms. - bool verify_compression = false; - - // If used, For every data block we load into memory, we will create a bitmap - // of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap - // will be used to figure out the percentage we actually read of the blocks. - // - // When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and - // Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the - // read amplification using this formula - // (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) - // - // value => memory usage (percentage of loaded blocks memory) - // 1 => 12.50 % - // 2 => 06.25 % - // 4 => 03.12 % - // 8 => 01.56 % - // 16 => 00.78 % - // - // Note: This number must be a power of 2, if not it will be sanitized - // to be the next lowest power of 2, for example a value of 7 will be - // treated as 4, a value of 19 will be treated as 16. - // - // Default: 0 (disabled) - uint32_t read_amp_bytes_per_bit = 0; - - // We currently have five versions: - // 0 -- This version is currently written out by all RocksDB's versions by - // default. Can be read by really old RocksDB's. Doesn't support changing - // checksum (default is CRC32). - // 1 -- Can be read by RocksDB's versions since 3.0. Supports non-default - // checksum, like xxHash. It is written by RocksDB when - // BlockBasedTableOptions::checksum is something other than kCRC32c. (version - // 0 is silently upconverted) - // 2 -- Can be read by RocksDB's versions since 3.10. Changes the way we - // encode compressed blocks with LZ4, BZip2 and Zlib compression. If you - // don't plan to run RocksDB before version 3.10, you should probably use - // this. - // 3 -- Can be read by RocksDB's versions since 5.15. Changes the way we - // encode the keys in index blocks. If you don't plan to run RocksDB before - // version 5.15, you should probably use this. - // This option only affects newly written tables. When reading existing - // tables, the information about version is read from the footer. - // 4 -- Can be read by RocksDB's versions since 5.16. Changes the way we - // encode the values in index blocks. If you don't plan to run RocksDB before - // version 5.16 and you are using index_block_restart_interval > 1, you should - // probably use this as it would reduce the index size. - // This option only affects newly written tables. When reading existing - // tables, the information about version is read from the footer. - // 5 -- Can be read by RocksDB's versions since 6.6.0. Full and partitioned - // filters use a generally faster and more accurate Bloom filter - // implementation, with a different schema. - uint32_t format_version = 4; - - // Store index blocks on disk in compressed format. Changing this option to - // false will avoid the overhead of decompression if index blocks are evicted - // and read back - bool enable_index_compression = true; - - // Align data blocks on lesser of page size and block size - bool block_align = false; - - // This enum allows trading off increased index size for improved iterator - // seek performance in some situations, particularly when block cache is - // disabled (ReadOptions::fill_cache = false) and direct IO is - // enabled (DBOptions::use_direct_reads = true). - // The default mode is the best tradeoff for most use cases. - // This option only affects newly written tables. - // - // The index contains a key separating each pair of consecutive blocks. - // Let A be the highest key in one block, B the lowest key in the next block, - // and I the index entry separating these two blocks: - // [ ... A] I [B ...] - // I is allowed to be anywhere in [A, B). - // If an iterator is seeked to a key in (A, I], we'll unnecessarily read the - // first block, then immediately fall through to the second block. - // However, if I=A, this can't happen, and we'll read only the second block. - // In kNoShortening mode, we use I=A. In other modes, we use the shortest - // key in [A, B), which usually significantly reduces index size. - // - // There's a similar story for the last index entry, which is an upper bound - // of the highest key in the file. If it's shortened and therefore - // overestimated, iterator is likely to unnecessarily read the last data block - // from each file on each seek. - enum class IndexShorteningMode : char { - // Use full keys. - kNoShortening, - // Shorten index keys between blocks, but use full key for the last index - // key, which is the upper bound of the whole file. - kShortenSeparators, - // Shorten both keys between blocks and key after last block. - kShortenSeparatorsAndSuccessor, - }; - - IndexShorteningMode index_shortening = - IndexShorteningMode::kShortenSeparators; -}; - -// Table Properties that are specific to block-based table properties. -struct BlockBasedTablePropertyNames { - // value of this properties is a fixed int32 number. - static const std::string kIndexType; - // value is "1" for true and "0" for false. - static const std::string kWholeKeyFiltering; - // value is "1" for true and "0" for false. - static const std::string kPrefixFiltering; -}; - -// Create default block based table factory. -extern TableFactory* NewBlockBasedTableFactory( - const BlockBasedTableOptions& table_options = BlockBasedTableOptions()); - -#ifndef ROCKSDB_LITE - -enum EncodingType : char { - // Always write full keys without any special encoding. - kPlain, - // Find opportunity to write the same prefix once for multiple rows. - // In some cases, when a key follows a previous key with the same prefix, - // instead of writing out the full key, it just writes out the size of the - // shared prefix, as well as other bytes, to save some bytes. - // - // When using this option, the user is required to use the same prefix - // extractor to make sure the same prefix will be extracted from the same key. - // The Name() value of the prefix extractor will be stored in the file. When - // reopening the file, the name of the options.prefix_extractor given will be - // bitwise compared to the prefix extractors stored in the file. An error - // will be returned if the two don't match. - kPrefix, -}; - -// Table Properties that are specific to plain table properties. -struct PlainTablePropertyNames { - static const std::string kEncodingType; - static const std::string kBloomVersion; - static const std::string kNumBloomBlocks; -}; - -const uint32_t kPlainTableVariableLength = 0; - -struct PlainTableOptions { - static const char* kName() { return "PlainTableOptions"; }; - // @user_key_len: plain table has optimization for fix-sized keys, which can - // be specified via user_key_len. Alternatively, you can pass - // `kPlainTableVariableLength` if your keys have variable - // lengths. - uint32_t user_key_len = kPlainTableVariableLength; - - // @bloom_bits_per_key: the number of bits used for bloom filer per prefix. - // You may disable it by passing a zero. - int bloom_bits_per_key = 10; - - // @hash_table_ratio: the desired utilization of the hash table used for - // prefix hashing. - // hash_table_ratio = number of prefixes / #buckets in the - // hash table - double hash_table_ratio = 0.75; - - // @index_sparseness: inside each prefix, need to build one index record for - // how many keys for binary search inside each hash bucket. - // For encoding type kPrefix, the value will be used when - // writing to determine an interval to rewrite the full - // key. It will also be used as a suggestion and satisfied - // when possible. - size_t index_sparseness = 16; - - // @huge_page_tlb_size: if <=0, allocate hash indexes and blooms from malloc. - // Otherwise from huge page TLB. The user needs to - // reserve huge pages for it to be allocated, like: - // sysctl -w vm.nr_hugepages=20 - // See linux doc Documentation/vm/hugetlbpage.txt - size_t huge_page_tlb_size = 0; - - // @encoding_type: how to encode the keys. See enum EncodingType above for - // the choices. The value will determine how to encode keys - // when writing to a new SST file. This value will be stored - // inside the SST file which will be used when reading from - // the file, which makes it possible for users to choose - // different encoding type when reopening a DB. Files with - // different encoding types can co-exist in the same DB and - // can be read. - EncodingType encoding_type = kPlain; - - // @full_scan_mode: mode for reading the whole file one record by one without - // using the index. - bool full_scan_mode = false; - - // @store_index_in_file: compute plain table index and bloom filter during - // file building and store it in file. When reading - // file, index will be mmaped instead of recomputation. - bool store_index_in_file = false; -}; - -// -- Plain Table with prefix-only seek -// For this factory, you need to set Options.prefix_extractor properly to make -// it work. Look-up will starts with prefix hash lookup for key prefix. Inside -// the hash bucket found, a binary search is executed for hash conflicts. -// Finally, a linear search is used. - -extern TableFactory* NewPlainTableFactory( - const PlainTableOptions& options = PlainTableOptions()); - -struct CuckooTablePropertyNames { - // The key that is used to fill empty buckets. - static const std::string kEmptyKey; - // Fixed length of value. - static const std::string kValueLength; - // Number of hash functions used in Cuckoo Hash. - static const std::string kNumHashFunc; - // It denotes the number of buckets in a Cuckoo Block. Given a key and a - // particular hash function, a Cuckoo Block is a set of consecutive buckets, - // where starting bucket id is given by the hash function on the key. In case - // of a collision during inserting the key, the builder tries to insert the - // key in other locations of the cuckoo block before using the next hash - // function. This reduces cache miss during read operation in case of - // collision. - static const std::string kCuckooBlockSize; - // Size of the hash table. Use this number to compute the modulo of hash - // function. The actual number of buckets will be kMaxHashTableSize + - // kCuckooBlockSize - 1. The last kCuckooBlockSize-1 buckets are used to - // accommodate the Cuckoo Block from end of hash table, due to cache friendly - // implementation. - static const std::string kHashTableSize; - // Denotes if the key sorted in the file is Internal Key (if false) - // or User Key only (if true). - static const std::string kIsLastLevel; - // Indicate if using identity function for the first hash function. - static const std::string kIdentityAsFirstHash; - // Indicate if using module or bit and to calculate hash value - static const std::string kUseModuleHash; - // Fixed user key length - static const std::string kUserKeyLength; -}; - -struct CuckooTableOptions { - static const char* kName() { return "CuckooTableOptions"; }; - - // Determines the utilization of hash tables. Smaller values - // result in larger hash tables with fewer collisions. - double hash_table_ratio = 0.9; - // A property used by builder to determine the depth to go to - // to search for a path to displace elements in case of - // collision. See Builder.MakeSpaceForKey method. Higher - // values result in more efficient hash tables with fewer - // lookups but take more time to build. - uint32_t max_search_depth = 100; - // In case of collision while inserting, the builder - // attempts to insert in the next cuckoo_block_size - // locations before skipping over to the next Cuckoo hash - // function. This makes lookups more cache friendly in case - // of collisions. - uint32_t cuckoo_block_size = 5; - // If this option is enabled, user key is treated as uint64_t and its value - // is used as hash value directly. This option changes builder's behavior. - // Reader ignore this option and behave according to what specified in table - // property. - bool identity_as_first_hash = false; - // If this option is set to true, module is used during hash calculation. - // This often yields better space efficiency at the cost of performance. - // If this option is set to false, # of entries in table is constrained to be - // power of two, and bit and is used to calculate hash, which is faster in - // general. - bool use_module_hash = true; -}; - -// Cuckoo Table Factory for SST table format using Cache Friendly Cuckoo Hashing -extern TableFactory* NewCuckooTableFactory( - const CuckooTableOptions& table_options = CuckooTableOptions()); - -#endif // ROCKSDB_LITE - -class RandomAccessFileReader; - -// A base class for table factories. -class TableFactory : public Configurable { - public: - virtual ~TableFactory() override {} - - static const char* kBlockCacheOpts() { return "BlockCache"; }; - static const char* kBlockBasedTableName() { return "BlockBasedTable"; }; - static const char* kPlainTableName() { return "PlainTable"; } - static const char* kCuckooTableName() { return "CuckooTable"; }; - - // Creates and configures a new TableFactory from the input options and id. - static Status CreateFromString(const ConfigOptions& config_options, - const std::string& id, - std::shared_ptr* factory); - - // The type of the table. - // - // The client of this package should switch to a new name whenever - // the table format implementation changes. - // - // Names starting with "rocksdb." are reserved and should not be used - // by any clients of this package. - virtual const char* Name() const = 0; - - // Returns true if the class is an instance of the input name. - // This is typically determined by if the input name matches the - // name of this object. - virtual bool IsInstanceOf(const std::string& name) const { - return name == Name(); - } - - // Returns a Table object table that can fetch data from file specified - // in parameter file. It's the caller's responsibility to make sure - // file is in the correct format. - // - // NewTableReader() is called in three places: - // (1) TableCache::FindTable() calls the function when table cache miss - // and cache the table object returned. - // (2) SstFileDumper (for SST Dump) opens the table and dump the table - // contents using the iterator of the table. - // (3) DBImpl::IngestExternalFile() calls this function to read the contents - // of the sst file it's attempting to add - // - // table_reader_options is a TableReaderOptions which contain all the - // needed parameters and configuration to open the table. - // file is a file handler to handle the file for the table. - // file_size is the physical file size of the file. - // table_reader is the output table reader. - virtual Status NewTableReader( - const TableReaderOptions& table_reader_options, - std::unique_ptr&& file, uint64_t file_size, - std::unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache = true) const { - ReadOptions ro; - return NewTableReader(ro, table_reader_options, std::move(file), file_size, - table_reader, prefetch_index_and_filter_in_cache); - } - - // Overload of the above function that allows the caller to pass in a - // ReadOptions - virtual Status NewTableReader( - const ReadOptions& ro, const TableReaderOptions& table_reader_options, - std::unique_ptr&& file, uint64_t file_size, - std::unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache) const = 0; - - // Return a table builder to write to a file for this table type. - // - // It is called in several places: - // (1) When flushing memtable to a level-0 output file, it creates a table - // builder (In DBImpl::WriteLevel0Table(), by calling BuildTable()) - // (2) During compaction, it gets the builder for writing compaction output - // files in DBImpl::OpenCompactionOutputFile(). - // (3) When recovering from transaction logs, it creates a table builder to - // write to a level-0 output file (In DBImpl::WriteLevel0TableForRecovery, - // by calling BuildTable()) - // (4) When running Repairer, it creates a table builder to convert logs to - // SST files (In Repairer::ConvertLogToTable() by calling BuildTable()) - // - // Multiple configured can be accessed from there, including and not limited - // to compression options. file is a handle of a writable file. - // It is the caller's responsibility to keep the file open and close the file - // after closing the table builder. compression_type is the compression type - // to use in this table. - virtual TableBuilder* NewTableBuilder( - const TableBuilderOptions& table_builder_options, - uint32_t column_family_id, WritableFileWriter* file) const = 0; - - // Return is delete range supported - virtual bool IsDeleteRangeSupported() const { return false; } -}; - -#ifndef ROCKSDB_LITE -// Create a special table factory that can open either of the supported -// table formats, based on setting inside the SST files. It should be used to -// convert a DB from one table format to another. -// @table_factory_to_write: the table factory used when writing to new files. -// @block_based_table_factory: block based table factory to use. If NULL, use -// a default one. -// @plain_table_factory: plain table factory to use. If NULL, use a default one. -// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default -// one. -extern TableFactory* NewAdaptiveTableFactory( - std::shared_ptr table_factory_to_write = nullptr, - std::shared_ptr block_based_table_factory = nullptr, - std::shared_ptr plain_table_factory = nullptr, - std::shared_ptr cuckoo_table_factory = nullptr); - -#endif // ROCKSDB_LITE - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/table_properties.h b/dist/darwin_amd64/include/rocksdb/table_properties.h deleted file mode 100644 index ba3eca7..0000000 --- a/dist/darwin_amd64/include/rocksdb/table_properties.h +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -#pragma once - -#include -#include -#include -#include "rocksdb/status.h" -#include "rocksdb/types.h" - -namespace ROCKSDB_NAMESPACE { - -// -- Table Properties -// Other than basic table properties, each table may also have the user -// collected properties. -// The value of the user-collected properties are encoded as raw bytes -- -// users have to interpret these values by themselves. -// Note: To do prefix seek/scan in `UserCollectedProperties`, you can do -// something similar to: -// -// UserCollectedProperties props = ...; -// for (auto pos = props.lower_bound(prefix); -// pos != props.end() && pos->first.compare(0, prefix.size(), prefix) == 0; -// ++pos) { -// ... -// } -typedef std::map UserCollectedProperties; - -// table properties' human-readable names in the property block. -struct TablePropertiesNames { - static const std::string kDbId; - static const std::string kDbSessionId; - static const std::string kDataSize; - static const std::string kIndexSize; - static const std::string kIndexPartitions; - static const std::string kTopLevelIndexSize; - static const std::string kIndexKeyIsUserKey; - static const std::string kIndexValueIsDeltaEncoded; - static const std::string kFilterSize; - static const std::string kRawKeySize; - static const std::string kRawValueSize; - static const std::string kNumDataBlocks; - static const std::string kNumEntries; - static const std::string kDeletedKeys; - static const std::string kMergeOperands; - static const std::string kNumRangeDeletions; - static const std::string kFormatVersion; - static const std::string kFixedKeyLen; - static const std::string kFilterPolicy; - static const std::string kColumnFamilyName; - static const std::string kColumnFamilyId; - static const std::string kComparator; - static const std::string kMergeOperator; - static const std::string kPrefixExtractorName; - static const std::string kPropertyCollectors; - static const std::string kCompression; - static const std::string kCompressionOptions; - static const std::string kCreationTime; - static const std::string kOldestKeyTime; - static const std::string kFileCreationTime; -}; - -extern const std::string kPropertiesBlock; -extern const std::string kCompressionDictBlock; -extern const std::string kRangeDelBlock; - -// `TablePropertiesCollector` provides the mechanism for users to collect -// their own properties that they are interested in. This class is essentially -// a collection of callback functions that will be invoked during table -// building. It is constructed with TablePropertiesCollectorFactory. The methods -// don't need to be thread-safe, as we will create exactly one -// TablePropertiesCollector object per table and then call it sequentially -class TablePropertiesCollector { - public: - virtual ~TablePropertiesCollector() {} - - // DEPRECATE User defined collector should implement AddUserKey(), though - // this old function still works for backward compatible reason. - // Add() will be called when a new key/value pair is inserted into the table. - // @params key the user key that is inserted into the table. - // @params value the value that is inserted into the table. - virtual Status Add(const Slice& /*key*/, const Slice& /*value*/) { - return Status::InvalidArgument( - "TablePropertiesCollector::Add() deprecated."); - } - - // AddUserKey() will be called when a new key/value pair is inserted into the - // table. - // @params key the user key that is inserted into the table. - // @params value the value that is inserted into the table. - virtual Status AddUserKey(const Slice& key, const Slice& value, - EntryType /*type*/, SequenceNumber /*seq*/, - uint64_t /*file_size*/) { - // For backwards-compatibility. - return Add(key, value); - } - - // Called after each new block is cut - virtual void BlockAdd(uint64_t /* blockRawBytes */, - uint64_t /* blockCompressedBytesFast */, - uint64_t /* blockCompressedBytesSlow */) { - // Nothing to do here. Callback registers can override. - return; - } - - // Finish() will be called when a table has already been built and is ready - // for writing the properties block. - // @params properties User will add their collected statistics to - // `properties`. - virtual Status Finish(UserCollectedProperties* properties) = 0; - - // Return the human-readable properties, where the key is property name and - // the value is the human-readable form of value. - virtual UserCollectedProperties GetReadableProperties() const = 0; - - // The name of the properties collector can be used for debugging purpose. - virtual const char* Name() const = 0; - - // EXPERIMENTAL Return whether the output file should be further compacted - virtual bool NeedCompact() const { return false; } -}; - -// Constructs TablePropertiesCollector. Internals create a new -// TablePropertiesCollector for each new table -class TablePropertiesCollectorFactory { - public: - struct Context { - uint32_t column_family_id; - static const uint32_t kUnknownColumnFamily; - }; - - virtual ~TablePropertiesCollectorFactory() {} - // has to be thread-safe - virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) = 0; - - // The name of the properties collector can be used for debugging purpose. - virtual const char* Name() const = 0; - - // Can be overridden by sub-classes to return the Name, followed by - // configuration info that will // be logged to the info log when the - // DB is opened - virtual std::string ToString() const { return Name(); } -}; - -// TableProperties contains a bunch of read-only properties of its associated -// table. -struct TableProperties { - public: - // the total size of all data blocks. - uint64_t data_size = 0; - // the size of index block. - uint64_t index_size = 0; - // Total number of index partitions if kTwoLevelIndexSearch is used - uint64_t index_partitions = 0; - // Size of the top-level index if kTwoLevelIndexSearch is used - uint64_t top_level_index_size = 0; - // Whether the index key is user key. Otherwise it includes 8 byte of sequence - // number added by internal key format. - uint64_t index_key_is_user_key = 0; - // Whether delta encoding is used to encode the index values. - uint64_t index_value_is_delta_encoded = 0; - // the size of filter block. - uint64_t filter_size = 0; - // total raw key size - uint64_t raw_key_size = 0; - // total raw value size - uint64_t raw_value_size = 0; - // the number of blocks in this table - uint64_t num_data_blocks = 0; - // the number of entries in this table - uint64_t num_entries = 0; - // the number of deletions in the table - uint64_t num_deletions = 0; - // the number of merge operands in the table - uint64_t num_merge_operands = 0; - // the number of range deletions in this table - uint64_t num_range_deletions = 0; - // format version, reserved for backward compatibility - uint64_t format_version = 0; - // If 0, key is variable length. Otherwise number of bytes for each key. - uint64_t fixed_key_len = 0; - // ID of column family for this SST file, corresponding to the CF identified - // by column_family_name. - uint64_t column_family_id = ROCKSDB_NAMESPACE:: - TablePropertiesCollectorFactory::Context::kUnknownColumnFamily; - // Timestamp of the latest key. 0 means unknown. - // TODO(sagar0): Should be changed to latest_key_time ... but don't know the - // full implications of backward compatibility. Hence retaining for now. - uint64_t creation_time = 0; - // Timestamp of the earliest key. 0 means unknown. - uint64_t oldest_key_time = 0; - // Actual SST file creation time. 0 means unknown. - uint64_t file_creation_time = 0; - - // DB identity - // db_id is an identifier generated the first time the DB is created - // If DB identity is unset or unassigned, `db_id` will be an empty string. - std::string db_id; - - // DB session identity - // db_session_id is an identifier that gets reset every time the DB is opened - // If DB session identity is unset or unassigned, `db_session_id` will be an - // empty string. - std::string db_session_id; - - // Name of the column family with which this SST file is associated. - // If column family is unknown, `column_family_name` will be an empty string. - std::string column_family_name; - - // The name of the filter policy used in this table. - // If no filter policy is used, `filter_policy_name` will be an empty string. - std::string filter_policy_name; - - // The name of the comparator used in this table. - std::string comparator_name; - - // The name of the merge operator used in this table. - // If no merge operator is used, `merge_operator_name` will be "nullptr". - std::string merge_operator_name; - - // The name of the prefix extractor used in this table - // If no prefix extractor is used, `prefix_extractor_name` will be "nullptr". - std::string prefix_extractor_name; - - // The names of the property collectors factories used in this table - // separated by commas - // {collector_name[1]},{collector_name[2]},{collector_name[3]} .. - std::string property_collectors_names; - - // The compression algo used to compress the SST files. - std::string compression_name; - - // Compression options used to compress the SST files. - std::string compression_options; - - // user collected properties - UserCollectedProperties user_collected_properties; - UserCollectedProperties readable_properties; - - // The offset of the value of each property in the file. - std::map properties_offsets; - - // convert this object to a human readable form - // @prop_delim: delimiter for each property. - std::string ToString(const std::string& prop_delim = "; ", - const std::string& kv_delim = "=") const; - - // Aggregate the numerical member variables of the specified - // TableProperties. - void Add(const TableProperties& tp); -}; - -// Extra properties -// Below is a list of non-basic properties that are collected by database -// itself. Especially some properties regarding to the internal keys (which -// is unknown to `table`). -// -// DEPRECATED: these properties now belong as TableProperties members. Please -// use TableProperties::num_deletions and TableProperties::num_merge_operands, -// respectively. -extern uint64_t GetDeletedKeys(const UserCollectedProperties& props); -extern uint64_t GetMergeOperands(const UserCollectedProperties& props, - bool* property_present); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/thread_status.h b/dist/darwin_amd64/include/rocksdb/thread_status.h deleted file mode 100644 index 6b2f5c8..0000000 --- a/dist/darwin_amd64/include/rocksdb/thread_status.h +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// This file defines the structures for exposing run-time status of any -// rocksdb-related thread. Such run-time status can be obtained via -// GetThreadList() API. -// -// Note that all thread-status features are still under-development, and -// thus APIs and class definitions might subject to change at this point. -// Will remove this comment once the APIs have been finalized. - -#pragma once - -#include -#include -#include -#include -#include -#include - -#if !defined(ROCKSDB_LITE) && !defined(NROCKSDB_THREAD_STATUS) && \ - defined(ROCKSDB_SUPPORT_THREAD_LOCAL) -#define ROCKSDB_USING_THREAD_STATUS -#endif - -namespace ROCKSDB_NAMESPACE { - -// TODO(yhchiang): remove this function once c++14 is available -// as std::max will be able to cover this. -// Current MS compiler does not support constexpr -template -struct constexpr_max { - static const int result = (A > B) ? A : B; -}; - -// A structure that describes the current status of a thread. -// The status of active threads can be fetched using -// ROCKSDB_NAMESPACE::GetThreadList(). -struct ThreadStatus { - // The type of a thread. - enum ThreadType : int { - HIGH_PRIORITY = 0, // RocksDB BG thread in high-pri thread pool - LOW_PRIORITY, // RocksDB BG thread in low-pri thread pool - USER, // User thread (Non-RocksDB BG thread) - BOTTOM_PRIORITY, // RocksDB BG thread in bottom-pri thread pool - NUM_THREAD_TYPES - }; - - // The type used to refer to a thread operation. - // A thread operation describes high-level action of a thread. - // Examples include compaction and flush. - enum OperationType : int { - OP_UNKNOWN = 0, - OP_COMPACTION, - OP_FLUSH, - NUM_OP_TYPES - }; - - enum OperationStage : int { - STAGE_UNKNOWN = 0, - STAGE_FLUSH_RUN, - STAGE_FLUSH_WRITE_L0, - STAGE_COMPACTION_PREPARE, - STAGE_COMPACTION_RUN, - STAGE_COMPACTION_PROCESS_KV, - STAGE_COMPACTION_INSTALL, - STAGE_COMPACTION_SYNC_FILE, - STAGE_PICK_MEMTABLES_TO_FLUSH, - STAGE_MEMTABLE_ROLLBACK, - STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS, - NUM_OP_STAGES - }; - - enum CompactionPropertyType : int { - COMPACTION_JOB_ID = 0, - COMPACTION_INPUT_OUTPUT_LEVEL, - COMPACTION_PROP_FLAGS, - COMPACTION_TOTAL_INPUT_BYTES, - COMPACTION_BYTES_READ, - COMPACTION_BYTES_WRITTEN, - NUM_COMPACTION_PROPERTIES - }; - - enum FlushPropertyType : int { - FLUSH_JOB_ID = 0, - FLUSH_BYTES_MEMTABLES, - FLUSH_BYTES_WRITTEN, - NUM_FLUSH_PROPERTIES - }; - - // The maximum number of properties of an operation. - // This number should be set to the biggest NUM_XXX_PROPERTIES. - static const int kNumOperationProperties = - constexpr_max::result; - - // The type used to refer to a thread state. - // A state describes lower-level action of a thread - // such as reading / writing a file or waiting for a mutex. - enum StateType : int { - STATE_UNKNOWN = 0, - STATE_MUTEX_WAIT = 1, - NUM_STATE_TYPES - }; - - ThreadStatus(const uint64_t _id, const ThreadType _thread_type, - const std::string& _db_name, const std::string& _cf_name, - const OperationType _operation_type, - const uint64_t _op_elapsed_micros, - const OperationStage _operation_stage, - const uint64_t _op_props[], const StateType _state_type) - : thread_id(_id), - thread_type(_thread_type), - db_name(_db_name), - cf_name(_cf_name), - operation_type(_operation_type), - op_elapsed_micros(_op_elapsed_micros), - operation_stage(_operation_stage), - state_type(_state_type) { - for (int i = 0; i < kNumOperationProperties; ++i) { - op_properties[i] = _op_props[i]; - } - } - - // An unique ID for the thread. - const uint64_t thread_id; - - // The type of the thread, it could be HIGH_PRIORITY, - // LOW_PRIORITY, and USER - const ThreadType thread_type; - - // The name of the DB instance where the thread is currently - // involved with. It would be set to empty string if the thread - // does not involve in any DB operation. - const std::string db_name; - - // The name of the column family where the thread is currently - // It would be set to empty string if the thread does not involve - // in any column family. - const std::string cf_name; - - // The operation (high-level action) that the current thread is involved. - const OperationType operation_type; - - // The elapsed time of the current thread operation in microseconds. - const uint64_t op_elapsed_micros; - - // An integer showing the current stage where the thread is involved - // in the current operation. - const OperationStage operation_stage; - - // A list of properties that describe some details about the current - // operation. Same field in op_properties[] might have different - // meanings for different operations. - uint64_t op_properties[kNumOperationProperties]; - - // The state (lower-level action) that the current thread is involved. - const StateType state_type; - - // The followings are a set of utility functions for interpreting - // the information of ThreadStatus - - static std::string GetThreadTypeName(ThreadType thread_type); - - // Obtain the name of an operation given its type. - static const std::string& GetOperationName(OperationType op_type); - - static const std::string MicrosToString(uint64_t op_elapsed_time); - - // Obtain a human-readable string describing the specified operation stage. - static const std::string& GetOperationStageName(OperationStage stage); - - // Obtain the name of the "i"th operation property of the - // specified operation. - static const std::string& GetOperationPropertyName(OperationType op_type, - int i); - - // Translate the "i"th property of the specified operation given - // a property value. - static std::map InterpretOperationProperties( - OperationType op_type, const uint64_t* op_properties); - - // Obtain the name of a state given its type. - static const std::string& GetStateName(StateType state_type); -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/threadpool.h b/dist/darwin_amd64/include/rocksdb/threadpool.h deleted file mode 100644 index b39321f..0000000 --- a/dist/darwin_amd64/include/rocksdb/threadpool.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -#pragma once - -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -/* - * ThreadPool is a component that will spawn N background threads that will - * be used to execute scheduled work, The number of background threads could - * be modified by calling SetBackgroundThreads(). - * */ -class ThreadPool { - public: - virtual ~ThreadPool() {} - - // Wait for all threads to finish. - // Discard those threads that did not start - // executing - virtual void JoinAllThreads() = 0; - - // Set the number of background threads that will be executing the - // scheduled jobs. - virtual void SetBackgroundThreads(int num) = 0; - virtual int GetBackgroundThreads() = 0; - - // Get the number of jobs scheduled in the ThreadPool queue. - virtual unsigned int GetQueueLen() const = 0; - - // Waits for all jobs to complete those - // that already started running and those that did not - // start yet. This ensures that everything that was thrown - // on the TP runs even though - // we may not have specified enough threads for the amount - // of jobs - virtual void WaitForJobsAndJoinAllThreads() = 0; - - // Submit a fire and forget jobs - // This allows to submit the same job multiple times - virtual void SubmitJob(const std::function&) = 0; - // This moves the function in for efficiency - virtual void SubmitJob(std::function&&) = 0; -}; - -// NewThreadPool() is a function that could be used to create a ThreadPool -// with `num_threads` background threads. -extern ThreadPool* NewThreadPool(int num_threads); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/trace_reader_writer.h b/dist/darwin_amd64/include/rocksdb/trace_reader_writer.h deleted file mode 100644 index d58ed47..0000000 --- a/dist/darwin_amd64/include/rocksdb/trace_reader_writer.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include "rocksdb/env.h" - -namespace ROCKSDB_NAMESPACE { - -// Allow custom implementations of TraceWriter and TraceReader. -// By default, RocksDB provides a way to capture the traces to a file using the -// factory NewFileTraceWriter(). But users could also choose to export traces to -// any other system by providing custom implementations of TraceWriter and -// TraceReader. - -// TraceWriter allows exporting RocksDB traces to any system, one operation at -// a time. -class TraceWriter { - public: - TraceWriter() {} - virtual ~TraceWriter() {} - - virtual Status Write(const Slice& data) = 0; - virtual Status Close() = 0; - virtual uint64_t GetFileSize() = 0; -}; - -// TraceReader allows reading RocksDB traces from any system, one operation at -// a time. A RocksDB Replayer could depend on this to replay opertions. -class TraceReader { - public: - TraceReader() {} - virtual ~TraceReader() {} - - virtual Status Read(std::string* data) = 0; - virtual Status Close() = 0; -}; - -// Factory methods to read/write traces from/to a file. -Status NewFileTraceWriter(Env* env, const EnvOptions& env_options, - const std::string& trace_filename, - std::unique_ptr* trace_writer); -Status NewFileTraceReader(Env* env, const EnvOptions& env_options, - const std::string& trace_filename, - std::unique_ptr* trace_reader); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/transaction_log.h b/dist/darwin_amd64/include/rocksdb/transaction_log.h deleted file mode 100644 index 48d0e5c..0000000 --- a/dist/darwin_amd64/include/rocksdb/transaction_log.h +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include "rocksdb/status.h" -#include "rocksdb/types.h" -#include "rocksdb/write_batch.h" - -namespace ROCKSDB_NAMESPACE { - -class LogFile; -typedef std::vector> VectorLogPtr; - -enum WalFileType { - /* Indicates that WAL file is in archive directory. WAL files are moved from - * the main db directory to archive directory once they are not live and stay - * there until cleaned up. Files are cleaned depending on archive size - * (Options::WAL_size_limit_MB) and time since last cleaning - * (Options::WAL_ttl_seconds). - */ - kArchivedLogFile = 0, - - /* Indicates that WAL file is live and resides in the main db directory */ - kAliveLogFile = 1 -}; - -class LogFile { - public: - LogFile() {} - virtual ~LogFile() {} - - // Returns log file's pathname relative to the main db dir - // Eg. For a live-log-file = /000003.log - // For an archived-log-file = /archive/000003.log - virtual std::string PathName() const = 0; - - // Primary identifier for log file. - // This is directly proportional to creation time of the log file - virtual uint64_t LogNumber() const = 0; - - // Log file can be either alive or archived - virtual WalFileType Type() const = 0; - - // Starting sequence number of writebatch written in this log file - virtual SequenceNumber StartSequence() const = 0; - - // Size of log file on disk in Bytes - virtual uint64_t SizeFileBytes() const = 0; -}; - -struct BatchResult { - SequenceNumber sequence = 0; - std::unique_ptr writeBatchPtr; - - // Add empty __ctor and __dtor for the rule of five - // However, preserve the original semantics and prohibit copying - // as the std::unique_ptr member does not copy. - BatchResult() {} - - ~BatchResult() {} - - BatchResult(const BatchResult&) = delete; - - BatchResult& operator=(const BatchResult&) = delete; - - BatchResult(BatchResult&& bResult) - : sequence(std::move(bResult.sequence)), - writeBatchPtr(std::move(bResult.writeBatchPtr)) {} - - BatchResult& operator=(BatchResult&& bResult) { - sequence = std::move(bResult.sequence); - writeBatchPtr = std::move(bResult.writeBatchPtr); - return *this; - } -}; - -// A TransactionLogIterator is used to iterate over the transactions in a db. -// One run of the iterator is continuous, i.e. the iterator will stop at the -// beginning of any gap in sequences -class TransactionLogIterator { - public: - TransactionLogIterator() {} - virtual ~TransactionLogIterator() {} - - // An iterator is either positioned at a WriteBatch or not valid. - // This method returns true if the iterator is valid. - // Can read data from a valid iterator. - virtual bool Valid() = 0; - - // Moves the iterator to the next WriteBatch. - // REQUIRES: Valid() to be true. - virtual void Next() = 0; - - // Returns ok if the iterator is valid. - // Returns the Error when something has gone wrong. - virtual Status status() = 0; - - // If valid return's the current write_batch and the sequence number of the - // earliest transaction contained in the batch. - // ONLY use if Valid() is true and status() is OK. - virtual BatchResult GetBatch() = 0; - - // The read options for TransactionLogIterator. - struct ReadOptions { - // If true, all data read from underlying storage will be - // verified against corresponding checksums. - // Default: true - bool verify_checksums_; - - ReadOptions() : verify_checksums_(true) {} - - explicit ReadOptions(bool verify_checksums) - : verify_checksums_(verify_checksums) {} - }; -}; -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/types.h b/dist/darwin_amd64/include/rocksdb/types.h deleted file mode 100644 index a4ab9c0..0000000 --- a/dist/darwin_amd64/include/rocksdb/types.h +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include "rocksdb/slice.h" - -namespace ROCKSDB_NAMESPACE { - -// Define all public custom types here. - -// Represents a sequence number in a WAL file. -typedef uint64_t SequenceNumber; - -const SequenceNumber kMinUnCommittedSeq = 1; // 0 is always committed - -// User-oriented representation of internal key types. -// Ordering of this enum entries should not change. -enum EntryType { - kEntryPut, - kEntryDelete, - kEntrySingleDelete, - kEntryMerge, - kEntryRangeDeletion, - kEntryBlobIndex, - kEntryDeleteWithTimestamp, - kEntryOther, -}; - -// tuple. -struct FullKey { - Slice user_key; - SequenceNumber sequence; - EntryType type; - - FullKey() : sequence(0) {} // Intentionally left uninitialized (for speed) - FullKey(const Slice& u, const SequenceNumber& seq, EntryType t) - : user_key(u), sequence(seq), type(t) {} - std::string DebugString(bool hex = false) const; - - void clear() { - user_key.clear(); - sequence = 0; - type = EntryType::kEntryPut; - } -}; - -// Parse slice representing internal key to FullKey -// Parsed FullKey is valid for as long as the memory pointed to by -// internal_key is alive. -bool ParseFullKey(const Slice& internal_key, FullKey* result); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/universal_compaction.h b/dist/darwin_amd64/include/rocksdb/universal_compaction.h deleted file mode 100644 index e3aeee6..0000000 --- a/dist/darwin_amd64/include/rocksdb/universal_compaction.h +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include - -namespace ROCKSDB_NAMESPACE { - -// -// Algorithm used to make a compaction request stop picking new files -// into a single compaction run -// -enum CompactionStopStyle { - kCompactionStopStyleSimilarSize, // pick files of similar size - kCompactionStopStyleTotalSize // total size of picked files > next file -}; - -class CompactionOptionsUniversal { - public: - // Percentage flexibility while comparing file size. If the candidate file(s) - // size is 1% smaller than the next file's size, then include next file into - // this candidate set. // Default: 1 - unsigned int size_ratio; - - // The minimum number of files in a single compaction run. Default: 2 - unsigned int min_merge_width; - - // The maximum number of files in a single compaction run. Default: UINT_MAX - unsigned int max_merge_width; - - // The size amplification is defined as the amount (in percentage) of - // additional storage needed to store a single byte of data in the database. - // For example, a size amplification of 2% means that a database that - // contains 100 bytes of user-data may occupy upto 102 bytes of - // physical storage. By this definition, a fully compacted database has - // a size amplification of 0%. Rocksdb uses the following heuristic - // to calculate size amplification: it assumes that all files excluding - // the earliest file contribute to the size amplification. - // Default: 200, which means that a 100 byte database could require upto - // 300 bytes of storage. - unsigned int max_size_amplification_percent; - - // If this option is set to be -1 (the default value), all the output files - // will follow compression type specified. - // - // If this option is not negative, we will try to make sure compressed - // size is just above this value. In normal cases, at least this percentage - // of data will be compressed. - // When we are compacting to a new file, here is the criteria whether - // it needs to be compressed: assuming here are the list of files sorted - // by generation time: - // A1...An B1...Bm C1...Ct - // where A1 is the newest and Ct is the oldest, and we are going to compact - // B1...Bm, we calculate the total size of all the files as total_size, as - // well as the total size of C1...Ct as total_C, the compaction output file - // will be compressed iff - // total_C / total_size < this percentage - // Default: -1 - int compression_size_percent; - - // The algorithm used to stop picking files into a single compaction run - // Default: kCompactionStopStyleTotalSize - CompactionStopStyle stop_style; - - // Option to optimize the universal multi level compaction by enabling - // trivial move for non overlapping files. - // Default: false - bool allow_trivial_move; - - // Default set of parameters - CompactionOptionsUniversal() - : size_ratio(1), - min_merge_width(2), - max_merge_width(UINT_MAX), - max_size_amplification_percent(200), - compression_size_percent(-1), - stop_style(kCompactionStopStyleTotalSize), - allow_trivial_move(false) {} -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h b/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h deleted file mode 100644 index 36f29ed..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once -#ifndef ROCKSDB_LITE - -#include -#include -#include -#include -#include - -#include "rocksdb/utilities/stackable_db.h" - -#include "rocksdb/env.h" -#include "rocksdb/options.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -// The default DB file checksum function name. -constexpr char kDbFileChecksumFuncName[] = "FileChecksumCrc32c"; -// The default BackupEngine file checksum function name. -constexpr char kBackupFileChecksumFuncName[] = "crc32c"; - -struct BackupableDBOptions { - // Where to keep the backup files. Has to be different than dbname_ - // Best to set this to dbname_ + "/backups" - // Required - std::string backup_dir; - - // Backup Env object. It will be used for backup file I/O. If it's - // nullptr, backups will be written out using DBs Env. If it's - // non-nullptr, backup's I/O will be performed using this object. - // If you want to have backups on HDFS, use HDFS Env here! - // Default: nullptr - Env* backup_env; - - // If share_table_files == true, backup will assume that table files with - // same name have the same contents. This enables incremental backups and - // avoids unnecessary data copies. - // If share_table_files == false, each backup will be on its own and will - // not share any data with other backups. - // default: true - bool share_table_files; - - // Backup info and error messages will be written to info_log - // if non-nullptr. - // Default: nullptr - Logger* info_log; - - // If sync == true, we can guarantee you'll get consistent backup even - // on a machine crash/reboot. Backup process is slower with sync enabled. - // If sync == false, we don't guarantee anything on machine reboot. However, - // chances are some of the backups are consistent. - // Default: true - bool sync; - - // If true, it will delete whatever backups there are already - // Default: false - bool destroy_old_data; - - // If false, we won't backup log files. This option can be useful for backing - // up in-memory databases where log file are persisted, but table files are in - // memory. - // Default: true - bool backup_log_files; - - // Max bytes that can be transferred in a second during backup. - // If 0, go as fast as you can - // Default: 0 - uint64_t backup_rate_limit; - - // Backup rate limiter. Used to control transfer speed for backup. If this is - // not null, backup_rate_limit is ignored. - // Default: nullptr - std::shared_ptr backup_rate_limiter{nullptr}; - - // Max bytes that can be transferred in a second during restore. - // If 0, go as fast as you can - // Default: 0 - uint64_t restore_rate_limit; - - // Restore rate limiter. Used to control transfer speed during restore. If - // this is not null, restore_rate_limit is ignored. - // Default: nullptr - std::shared_ptr restore_rate_limiter{nullptr}; - - // Only used if share_table_files is set to true. If true, will consider - // that backups can come from different databases, even differently mutated - // databases with the same DB ID. See share_files_with_checksum_naming and - // ShareFilesNaming for details on how table files names are made - // unique between databases. - // - // Using 'true' is fundamentally safer, and performance improvements vs. - // original design should leave almost no reason to use the 'false' setting. - // - // Default (only for historical reasons): false - bool share_files_with_checksum; - - // Up to this many background threads will copy files for CreateNewBackup() - // and RestoreDBFromBackup() - // Default: 1 - int max_background_operations; - - // During backup user can get callback every time next - // callback_trigger_interval_size bytes being copied. - // Default: 4194304 - uint64_t callback_trigger_interval_size; - - // For BackupEngineReadOnly, Open() will open at most this many of the - // latest non-corrupted backups. - // - // Note: this setting is ignored (behaves like INT_MAX) for any kind of - // writable BackupEngine because it would inhibit accounting for shared - // files for proper backup deletion, including purging any incompletely - // created backups on creation of a new backup. - // - // Default: INT_MAX - int max_valid_backups_to_open; - - // ShareFilesNaming describes possible naming schemes for backup - // table file names when the table files are stored in the shared_checksum - // directory (i.e., both share_table_files and share_files_with_checksum - // are true). - enum ShareFilesNaming : uint32_t { - // Backup SST filenames are __.sst - // where is an unsigned decimal integer. This is the - // original/legacy naming scheme for share_files_with_checksum, - // with two problems: - // * At massive scale, collisions on this triple with different file - // contents is plausible. - // * Determining the name to use requires computing the checksum, - // so generally requires reading the whole file even if the file - // is already backed up. - // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** - kLegacyCrc32cAndFileSize = 1U, - - // Backup SST filenames are _s.sst. This - // pair of values should be very strongly unique for a given SST file - // and easily determined before computing a checksum. The 's' indicates - // the value is a DB session id, not a checksum. - // - // Exceptions: - // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize - // will be used instead, matching the names assigned by RocksDB versions - // not supporting the newer naming scheme. - // * See also flags below. - kUseDbSessionId = 2U, - - kMaskNoNamingFlags = 0xffffU, - - // If not already part of the naming scheme, insert - // _ - // before .sst in the name. In case of user code actually parsing the - // last _ before the .sst as the file size, this preserves that - // feature of kLegacyCrc32cAndFileSize. In other words, this option makes - // official that unofficial feature of the backup metadata. - // - // We do not consider SST file sizes to have sufficient entropy to - // contribute significantly to naming uniqueness. - kFlagIncludeFileSize = 1U << 31, - - // When encountering an SST file from a Facebook-internal early - // release of 6.12, use the default naming scheme in effect for - // when the SST file was generated (assuming full file checksum - // was not set to GetFileChecksumGenCrc32cFactory()). That naming is - // _.sst - // and ignores kFlagIncludeFileSize setting. - // NOTE: This flag is intended to be temporary and should be removed - // in a later release. - kFlagMatchInterimNaming = 1U << 30, - - kMaskNamingFlags = ~kMaskNoNamingFlags, - }; - - // Naming option for share_files_with_checksum table files. See - // ShareFilesNaming for details. - // - // Modifying this option cannot introduce a downgrade compatibility issue - // because RocksDB can read, restore, and delete backups using different file - // names, and it's OK for a backup directory to use a mixture of table file - // naming schemes. - // - // However, modifying this option and saving more backups to the same - // directory can lead to the same file getting saved again to that - // directory, under the new shared name in addition to the old shared - // name. - // - // Default: kUseDbSessionId | kFlagIncludeFileSize | kFlagMatchInterimNaming - // - // Note: This option comes into effect only if both share_files_with_checksum - // and share_table_files are true. - ShareFilesNaming share_files_with_checksum_naming; - - void Dump(Logger* logger) const; - - explicit BackupableDBOptions( - const std::string& _backup_dir, Env* _backup_env = nullptr, - bool _share_table_files = true, Logger* _info_log = nullptr, - bool _sync = true, bool _destroy_old_data = false, - bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, - uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, - uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024, - int _max_valid_backups_to_open = INT_MAX, - ShareFilesNaming _share_files_with_checksum_naming = - static_cast(kUseDbSessionId | kFlagIncludeFileSize | - kFlagMatchInterimNaming)) - : backup_dir(_backup_dir), - backup_env(_backup_env), - share_table_files(_share_table_files), - info_log(_info_log), - sync(_sync), - destroy_old_data(_destroy_old_data), - backup_log_files(_backup_log_files), - backup_rate_limit(_backup_rate_limit), - restore_rate_limit(_restore_rate_limit), - share_files_with_checksum(false), - max_background_operations(_max_background_operations), - callback_trigger_interval_size(_callback_trigger_interval_size), - max_valid_backups_to_open(_max_valid_backups_to_open), - share_files_with_checksum_naming(_share_files_with_checksum_naming) { - assert(share_table_files || !share_files_with_checksum); - assert((share_files_with_checksum_naming & kMaskNoNamingFlags) != 0); - } -}; - -inline BackupableDBOptions::ShareFilesNaming operator&( - BackupableDBOptions::ShareFilesNaming lhs, - BackupableDBOptions::ShareFilesNaming rhs) { - uint32_t l = static_cast(lhs); - uint32_t r = static_cast(rhs); - assert(r == BackupableDBOptions::kMaskNoNamingFlags || - (r & BackupableDBOptions::kMaskNoNamingFlags) == 0); - return static_cast(l & r); -} - -inline BackupableDBOptions::ShareFilesNaming operator|( - BackupableDBOptions::ShareFilesNaming lhs, - BackupableDBOptions::ShareFilesNaming rhs) { - uint32_t l = static_cast(lhs); - uint32_t r = static_cast(rhs); - assert((r & BackupableDBOptions::kMaskNoNamingFlags) == 0); - return static_cast(l | r); -} - -struct CreateBackupOptions { - // Flush will always trigger if 2PC is enabled. - // If write-ahead logs are disabled, set flush_before_backup=true to - // avoid losing unflushed key/value pairs from the memtable. - bool flush_before_backup = false; - - // Callback for reporting progress, based on callback_trigger_interval_size. - std::function progress_callback = []() {}; - - // If false, background_thread_cpu_priority is ignored. - // Otherwise, the cpu priority can be decreased, - // if you try to increase the priority, the priority will not change. - // The initial priority of the threads is CpuPriority::kNormal, - // so you can decrease to priorities lower than kNormal. - bool decrease_background_thread_cpu_priority = false; - CpuPriority background_thread_cpu_priority = CpuPriority::kNormal; -}; - -struct RestoreOptions { - // If true, restore won't overwrite the existing log files in wal_dir. It will - // also move all log files from archive directory to wal_dir. Use this option - // in combination with BackupableDBOptions::backup_log_files = false for - // persisting in-memory databases. - // Default: false - bool keep_log_files; - - explicit RestoreOptions(bool _keep_log_files = false) - : keep_log_files(_keep_log_files) {} -}; - -typedef uint32_t BackupID; - -struct BackupInfo { - BackupID backup_id; - int64_t timestamp; - uint64_t size; - - uint32_t number_files; - std::string app_metadata; - - BackupInfo() {} - - BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size, - uint32_t _number_files, const std::string& _app_metadata) - : backup_id(_backup_id), - timestamp(_timestamp), - size(_size), - number_files(_number_files), - app_metadata(_app_metadata) {} -}; - -class BackupStatistics { - public: - BackupStatistics() { - number_success_backup = 0; - number_fail_backup = 0; - } - - BackupStatistics(uint32_t _number_success_backup, - uint32_t _number_fail_backup) - : number_success_backup(_number_success_backup), - number_fail_backup(_number_fail_backup) {} - - ~BackupStatistics() {} - - void IncrementNumberSuccessBackup(); - void IncrementNumberFailBackup(); - - uint32_t GetNumberSuccessBackup() const; - uint32_t GetNumberFailBackup() const; - - std::string ToString() const; - - private: - uint32_t number_success_backup; - uint32_t number_fail_backup; -}; - -// A backup engine for accessing information about backups and restoring from -// them. -// BackupEngineReadOnly is not extensible. -class BackupEngineReadOnly { - public: - virtual ~BackupEngineReadOnly() {} - - static Status Open(const BackupableDBOptions& options, Env* db_env, - BackupEngineReadOnly** backup_engine_ptr); - // keep for backward compatibility. - static Status Open(Env* db_env, const BackupableDBOptions& options, - BackupEngineReadOnly** backup_engine_ptr) { - return BackupEngineReadOnly::Open(options, db_env, backup_engine_ptr); - } - - // Returns info about backups in backup_info - // You can GetBackupInfo safely, even with other BackupEngine performing - // backups on the same directory - virtual void GetBackupInfo(std::vector* backup_info) = 0; - - // Returns info about corrupt backups in corrupt_backups - virtual void GetCorruptedBackups( - std::vector* corrupt_backup_ids) = 0; - - // Restoring DB from backup is NOT safe when there is another BackupEngine - // running that might call DeleteBackup() or PurgeOldBackups(). It is caller's - // responsibility to synchronize the operation, i.e. don't delete the backup - // when you're restoring from it - // See also the corresponding doc in BackupEngine - virtual Status RestoreDBFromBackup(const RestoreOptions& options, - BackupID backup_id, - const std::string& db_dir, - const std::string& wal_dir) = 0; - - // keep for backward compatibility. - virtual Status RestoreDBFromBackup( - BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& options = RestoreOptions()) { - return RestoreDBFromBackup(options, backup_id, db_dir, wal_dir); - } - - // See the corresponding doc in BackupEngine - virtual Status RestoreDBFromLatestBackup(const RestoreOptions& options, - const std::string& db_dir, - const std::string& wal_dir) = 0; - - // keep for backward compatibility. - virtual Status RestoreDBFromLatestBackup( - const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& options = RestoreOptions()) { - return RestoreDBFromLatestBackup(options, db_dir, wal_dir); - } - - // If verify_with_checksum is true, this function - // inspects the current checksums and file sizes of backup files to see if - // they match our expectation. - // - // If verify_with_checksum is false, this function - // checks that each file exists and that the size of the file matches our - // expectation. It does not check file checksum. - // - // If this BackupEngine created the backup, it compares the files' current - // sizes (and current checksum) against the number of bytes written to - // them (and the checksum calculated) during creation. - // Otherwise, it compares the files' current sizes (and checksums) against - // their sizes (and checksums) when the BackupEngine was opened. - // - // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id, - bool verify_with_checksum = false) = 0; -}; - -// A backup engine for creating new backups. -// BackupEngine is not extensible. -class BackupEngine { - public: - virtual ~BackupEngine() {} - - // BackupableDBOptions have to be the same as the ones used in previous - // BackupEngines for the same backup directory. - static Status Open(const BackupableDBOptions& options, Env* db_env, - BackupEngine** backup_engine_ptr); - - // keep for backward compatibility. - static Status Open(Env* db_env, const BackupableDBOptions& options, - BackupEngine** backup_engine_ptr) { - return BackupEngine::Open(options, db_env, backup_engine_ptr); - } - - // same as CreateNewBackup, but stores extra application metadata. - virtual Status CreateNewBackupWithMetadata( - const CreateBackupOptions& options, DB* db, - const std::string& app_metadata) = 0; - - // keep here for backward compatibility. - virtual Status CreateNewBackupWithMetadata( - DB* db, const std::string& app_metadata, bool flush_before_backup = false, - std::function progress_callback = []() {}) { - CreateBackupOptions options; - options.flush_before_backup = flush_before_backup; - options.progress_callback = progress_callback; - return CreateNewBackupWithMetadata(options, db, app_metadata); - } - - // Captures the state of the database in the latest backup - // NOT a thread safe call - virtual Status CreateNewBackup(const CreateBackupOptions& options, DB* db) { - return CreateNewBackupWithMetadata(options, db, ""); - } - - // keep here for backward compatibility. - virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false, - std::function progress_callback = - []() {}) { - CreateBackupOptions options; - options.flush_before_backup = flush_before_backup; - options.progress_callback = progress_callback; - return CreateNewBackup(options, db); - } - - // Deletes old backups, keeping latest num_backups_to_keep alive. - // See also DeleteBackup. - virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; - - // Deletes a specific backup. If this operation (or PurgeOldBackups) - // is not completed due to crash, power failure, etc. the state - // will be cleaned up the next time you call DeleteBackup, - // PurgeOldBackups, or GarbageCollect. - virtual Status DeleteBackup(BackupID backup_id) = 0; - - // Call this from another thread if you want to stop the backup - // that is currently happening. It will return immediatelly, will - // not wait for the backup to stop. - // The backup will stop ASAP and the call to CreateNewBackup will - // return Status::Incomplete(). It will not clean up after itself, but - // the state will remain consistent. The state will be cleaned up the - // next time you call CreateNewBackup or GarbageCollect. - virtual void StopBackup() = 0; - - // Returns info about backups in backup_info - virtual void GetBackupInfo(std::vector* backup_info) = 0; - - // Returns info about corrupt backups in corrupt_backups - virtual void GetCorruptedBackups( - std::vector* corrupt_backup_ids) = 0; - - // restore from backup with backup_id - // IMPORTANT -- if options_.share_table_files == true, - // options_.share_files_with_checksum == false, you restore DB from some - // backup that is not the latest, and you start creating new backups from the - // new DB, they will probably fail. - // - // Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. - // If you add new data to the DB and try creating a new backup now, the - // database will diverge from backups 4 and 5 and the new backup will fail. - // If you want to create new backup, you will first have to delete backups 4 - // and 5. - virtual Status RestoreDBFromBackup(const RestoreOptions& options, - BackupID backup_id, - const std::string& db_dir, - const std::string& wal_dir) = 0; - - // keep for backward compatibility. - virtual Status RestoreDBFromBackup( - BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& options = RestoreOptions()) { - return RestoreDBFromBackup(options, backup_id, db_dir, wal_dir); - } - - // restore from the latest backup - virtual Status RestoreDBFromLatestBackup(const RestoreOptions& options, - const std::string& db_dir, - const std::string& wal_dir) = 0; - - // keep for backward compatibility. - virtual Status RestoreDBFromLatestBackup( - const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& options = RestoreOptions()) { - return RestoreDBFromLatestBackup(options, db_dir, wal_dir); - } - - // If verify_with_checksum is true, this function - // inspects the current checksums and file sizes of backup files to see if - // they match our expectation. - // - // If verify_with_checksum is false, this function - // checks that each file exists and that the size of the file matches our - // expectation. It does not check file checksum. - // - // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id, - bool verify_with_checksum = false) = 0; - - // Will delete any files left over from incomplete creation or deletion of - // a backup. This is not normally needed as those operations also clean up - // after prior incomplete calls to the same kind of operation (create or - // delete). - // NOTE: This is not designed to delete arbitrary files added to the backup - // directory outside of BackupEngine, and clean-up is always subject to - // permissions on and availability of the underlying filesystem. - virtual Status GarbageCollect() = 0; -}; - -} // namespace ROCKSDB_NAMESPACE -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/checkpoint.h b/dist/darwin_amd64/include/rocksdb/utilities/checkpoint.h deleted file mode 100644 index 1b6a740..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/checkpoint.h +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// A checkpoint is an openable snapshot of a database at a point in time. - -#pragma once -#ifndef ROCKSDB_LITE - -#include -#include -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class DB; -class ColumnFamilyHandle; -struct LiveFileMetaData; -struct ExportImportFilesMetaData; - -class Checkpoint { - public: - // Creates a Checkpoint object to be used for creating openable snapshots - static Status Create(DB* db, Checkpoint** checkpoint_ptr); - - // Builds an openable snapshot of RocksDB on the same disk, which - // accepts an output directory on the same disk, and under the directory - // (1) hard-linked SST files pointing to existing live SST files - // SST files will be copied if output directory is on a different filesystem - // (2) a copied manifest files and other files - // The directory should not already exist and will be created by this API. - // The directory will be an absolute path - // log_size_for_flush: if the total log file size is equal or larger than - // this value, then a flush is triggered for all the column families. The - // default value is 0, which means flush is always triggered. If you move - // away from the default, the checkpoint may not contain up-to-date data - // if WAL writing is not always enabled. - // Flush will always trigger if it is 2PC. - // sequence_number_ptr: if it is not nullptr, the value it points to will be - // set to the DB's sequence number. The default value of this parameter is - // nullptr. - virtual Status CreateCheckpoint(const std::string& checkpoint_dir, - uint64_t log_size_for_flush = 0, - uint64_t* sequence_number_ptr = nullptr); - - // Exports all live SST files of a specified Column Family onto export_dir, - // returning SST files information in metadata. - // - SST files will be created as hard links when the directory specified - // is in the same partition as the db directory, copied otherwise. - // - export_dir should not already exist and will be created by this API. - // - Always triggers a flush. - virtual Status ExportColumnFamily(ColumnFamilyHandle* handle, - const std::string& export_dir, - ExportImportFilesMetaData** metadata); - - virtual ~Checkpoint() {} -}; - -} // namespace ROCKSDB_NAMESPACE -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/convenience.h b/dist/darwin_amd64/include/rocksdb/utilities/convenience.h deleted file mode 100644 index f61afd6..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/convenience.h +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -// This file was moved to rocksdb/convenience.h" - -#include "rocksdb/convenience.h" diff --git a/dist/darwin_amd64/include/rocksdb/utilities/db_ttl.h b/dist/darwin_amd64/include/rocksdb/utilities/db_ttl.h deleted file mode 100644 index d57e747..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/db_ttl.h +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifndef ROCKSDB_LITE - -#include -#include - -#include "rocksdb/db.h" -#include "rocksdb/utilities/stackable_db.h" - -namespace ROCKSDB_NAMESPACE { - -// Database with TTL support. -// -// USE-CASES: -// This API should be used to open the db when key-values inserted are -// meant to be removed from the db in a non-strict 'ttl' amount of time -// Therefore, this guarantees that key-values inserted will remain in the -// db for >= ttl amount of time and the db will make efforts to remove the -// key-values as soon as possible after ttl seconds of their insertion. -// -// BEHAVIOUR: -// TTL is accepted in seconds -// (int32_t)Timestamp(creation) is suffixed to values in Put internally -// Expired TTL values deleted in compaction only:(Timestamp+ttl=5 -// read_only=true opens in the usual read-only mode. Compactions will not be -// triggered(neither manual nor automatic), so no expired entries removed -// -// CONSTRAINTS: -// Not specifying/passing or non-positive TTL behaves like TTL = infinity -// -// !!!WARNING!!!: -// Calling DB::Open directly to re-open a db created by this API will get -// corrupt values(timestamp suffixed) and no ttl effect will be there -// during the second Open, so use this API consistently to open the db -// Be careful when passing ttl with a small positive value because the -// whole database may be deleted in a small amount of time - -class DBWithTTL : public StackableDB { - public: - virtual Status CreateColumnFamilyWithTtl( - const ColumnFamilyOptions& options, const std::string& column_family_name, - ColumnFamilyHandle** handle, int ttl) = 0; - - static Status Open(const Options& options, const std::string& dbname, - DBWithTTL** dbptr, int32_t ttl = 0, - bool read_only = false); - - static Status Open(const DBOptions& db_options, const std::string& dbname, - const std::vector& column_families, - std::vector* handles, - DBWithTTL** dbptr, const std::vector& ttls, - bool read_only = false); - - virtual void SetTtl(int32_t ttl) = 0; - - virtual void SetTtl(ColumnFamilyHandle* h, int32_t ttl) = 0; - - protected: - explicit DBWithTTL(DB* db) : StackableDB(db) {} -}; - -} // namespace ROCKSDB_NAMESPACE -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/debug.h b/dist/darwin_amd64/include/rocksdb/utilities/debug.h deleted file mode 100644 index a2b6adc..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/debug.h +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include "rocksdb/db.h" -#include "rocksdb/types.h" - -namespace ROCKSDB_NAMESPACE { - -// Data associated with a particular version of a key. A database may internally -// store multiple versions of a same user key due to snapshots, compaction not -// happening yet, etc. -struct KeyVersion { - KeyVersion() : user_key(""), value(""), sequence(0), type(0) {} - - KeyVersion(const std::string& _user_key, const std::string& _value, - SequenceNumber _sequence, int _type) - : user_key(_user_key), value(_value), sequence(_sequence), type(_type) {} - - std::string user_key; - std::string value; - SequenceNumber sequence; - // TODO(ajkr): we should provide a helper function that converts the int to a - // string describing the type for easier debugging. - int type; -}; - -// Returns listing of all versions of keys in the provided user key range. -// The range is inclusive-inclusive, i.e., [`begin_key`, `end_key`], or -// `max_num_ikeys` has been reached. Since all those keys returned will be -// copied to memory, if the range covers too many keys, the memory usage -// may be huge. `max_num_ikeys` can be used to cap the memory usage. -// The result is inserted into the provided vector, `key_versions`. -Status GetAllKeyVersions(DB* db, Slice begin_key, Slice end_key, - size_t max_num_ikeys, - std::vector* key_versions); - -Status GetAllKeyVersions(DB* db, ColumnFamilyHandle* cfh, Slice begin_key, - Slice end_key, size_t max_num_ikeys, - std::vector* key_versions); - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/env_librados.h b/dist/darwin_amd64/include/rocksdb/utilities/env_librados.h deleted file mode 100644 index 361217c..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/env_librados.h +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include - -#include "rocksdb/status.h" -#include "rocksdb/utilities/env_mirror.h" - -#include - -namespace ROCKSDB_NAMESPACE { -class LibradosWritableFile; - -class EnvLibrados : public EnvWrapper { - public: - // Create a brand new sequentially-readable file with the specified name. - // On success, stores a pointer to the new file in *result and returns OK. - // On failure stores nullptr in *result and returns non-OK. If the file does - // not exist, returns a non-OK status. - // - // The returned file will only be accessed by one thread at a time. - Status NewSequentialFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) override; - - // Create a brand new random access read-only file with the - // specified name. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. If the file does not exist, returns a non-OK - // status. - // - // The returned file may be concurrently accessed by multiple threads. - Status NewRandomAccessFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) override; - - // Create an object that writes to a new file with the specified - // name. Deletes any existing file with the same name and creates a - // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - // - // The returned file will only be accessed by one thread at a time. - Status NewWritableFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) override; - - // Reuse an existing file by renaming it and opening it as writable. - Status ReuseWritableFile(const std::string& fname, - const std::string& old_fname, - std::unique_ptr* result, - const EnvOptions& options) override; - - // Create an object that represents a directory. Will fail if directory - // doesn't exist. If the directory exists, it will open the directory - // and create a new Directory object. - // - // On success, stores a pointer to the new Directory in - // *result and returns OK. On failure stores nullptr in *result and - // returns non-OK. - Status NewDirectory(const std::string& name, - std::unique_ptr* result) override; - - // Returns OK if the named file exists. - // NotFound if the named file does not exist, - // the calling process does not have permission to determine - // whether this file exists, or if the path is invalid. - // IOError if an IO Error was encountered - Status FileExists(const std::string& fname) override; - - // Store in *result the names of the children of the specified directory. - // The names are relative to "dir". - // Original contents of *results are dropped. - Status GetChildren(const std::string& dir, std::vector* result); - - // Delete the named file. - Status DeleteFile(const std::string& fname) override; - - // Create the specified directory. Returns error if directory exists. - Status CreateDir(const std::string& dirname) override; - - // Creates directory if missing. Return Ok if it exists, or successful in - // Creating. - Status CreateDirIfMissing(const std::string& dirname) override; - - // Delete the specified directory. - Status DeleteDir(const std::string& dirname) override; - - // Store the size of fname in *file_size. - Status GetFileSize(const std::string& fname, uint64_t* file_size) override; - - // Store the last modification time of fname in *file_mtime. - Status GetFileModificationTime(const std::string& fname, - uint64_t* file_mtime) override; - // Rename file src to target. - Status RenameFile(const std::string& src, const std::string& target) override; - // Hard Link file src to target. - Status LinkFile(const std::string& src, const std::string& target) override; - - // Lock the specified file. Used to prevent concurrent access to - // the same db by multiple processes. On failure, stores nullptr in - // *lock and returns non-OK. - // - // On success, stores a pointer to the object that represents the - // acquired lock in *lock and returns OK. The caller should call - // UnlockFile(*lock) to release the lock. If the process exits, - // the lock will be automatically released. - // - // If somebody else already holds the lock, finishes immediately - // with a failure. I.e., this call does not wait for existing locks - // to go away. - // - // May create the named file if it does not already exist. - Status LockFile(const std::string& fname, FileLock** lock); - - // Release the lock acquired by a previous successful call to LockFile. - // REQUIRES: lock was returned by a successful LockFile() call - // REQUIRES: lock has not already been unlocked. - Status UnlockFile(FileLock* lock); - - // Get full directory name for this db. - Status GetAbsolutePath(const std::string& db_path, std::string* output_path); - - // Generate unique id - std::string GenerateUniqueId(); - - // Get default EnvLibrados - static EnvLibrados* Default(); - - explicit EnvLibrados(const std::string& db_name, - const std::string& config_path, - const std::string& db_pool); - - explicit EnvLibrados( - const std::string& client_name, // first 3 parameters are - // for RADOS client init - const std::string& cluster_name, const uint64_t flags, - const std::string& db_name, const std::string& config_path, - const std::string& db_pool, const std::string& wal_dir, - const std::string& wal_pool, const uint64_t write_buffer_size); - ~EnvLibrados() { _rados.shutdown(); } - - private: - std::string _client_name; - std::string _cluster_name; - uint64_t _flags; - std::string _db_name; // get from user, readable string; Also used as db_id - // for db metadata - std::string _config_path; - librados::Rados _rados; // RADOS client - std::string _db_pool_name; - librados::IoCtx _db_pool_ioctx; // IoCtx for connecting db_pool - std::string _wal_dir; // WAL dir path - std::string _wal_pool_name; - librados::IoCtx _wal_pool_ioctx; // IoCtx for connecting wal_pool - uint64_t _write_buffer_size; // WritableFile buffer max size - - /* private function to communicate with rados */ - std::string _CreateFid(); - Status _GetFid(const std::string& fname, std::string& fid); - Status _GetFid(const std::string& fname, std::string& fid, int fid_len); - Status _RenameFid(const std::string& old_fname, const std::string& new_fname); - Status _AddFid(const std::string& fname, const std::string& fid); - Status _DelFid(const std::string& fname); - Status _GetSubFnames(const std::string& dirname, - std::vector* result); - librados::IoCtx* _GetIoctx(const std::string& prefix); - friend class LibradosWritableFile; -}; -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/env_mirror.h b/dist/darwin_amd64/include/rocksdb/utilities/env_mirror.h deleted file mode 100644 index 8e96ac4..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/env_mirror.h +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -// Copyright (c) 2015, Red Hat, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// MirrorEnv is an Env implementation that mirrors all file-related -// operations to two backing Env's (provided at construction time). -// Writes are mirrored. For read operations, we do the read from both -// backends and assert that the results match. -// -// This is useful when implementing a new Env and ensuring that the -// semantics and behavior are correct (in that they match that of an -// existing, stable Env, like the default POSIX one). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include -#include -#include "rocksdb/env.h" - -namespace ROCKSDB_NAMESPACE { - -class SequentialFileMirror; -class RandomAccessFileMirror; -class WritableFileMirror; - -class EnvMirror : public EnvWrapper { - Env *a_, *b_; - bool free_a_, free_b_; - - public: - EnvMirror(Env* a, Env* b, bool free_a = false, bool free_b = false) - : EnvWrapper(a), a_(a), b_(b), free_a_(free_a), free_b_(free_b) {} - ~EnvMirror() { - if (free_a_) delete a_; - if (free_b_) delete b_; - } - - Status NewSequentialFile(const std::string& f, - std::unique_ptr* r, - const EnvOptions& options) override; - Status NewRandomAccessFile(const std::string& f, - std::unique_ptr* r, - const EnvOptions& options) override; - Status NewWritableFile(const std::string& f, std::unique_ptr* r, - const EnvOptions& options) override; - Status ReuseWritableFile(const std::string& fname, - const std::string& old_fname, - std::unique_ptr* r, - const EnvOptions& options) override; - virtual Status NewDirectory(const std::string& name, - std::unique_ptr* result) override { - std::unique_ptr br; - Status as = a_->NewDirectory(name, result); - Status bs = b_->NewDirectory(name, &br); - assert(as == bs); - return as; - } - Status FileExists(const std::string& f) override { - Status as = a_->FileExists(f); - Status bs = b_->FileExists(f); - assert(as == bs); - return as; - } -#if defined(_MSC_VER) -#pragma warning(push) -// logical operation on address of string constant -#pragma warning(disable : 4130) -#endif - Status GetChildren(const std::string& dir, - std::vector* r) override { - std::vector ar, br; - Status as = a_->GetChildren(dir, &ar); - Status bs = b_->GetChildren(dir, &br); - assert(as == bs); - std::sort(ar.begin(), ar.end()); - std::sort(br.begin(), br.end()); - if (!as.ok() || ar != br) { - assert(0 == "getchildren results don't match"); - } - *r = ar; - return as; - } -#if defined(_MSC_VER) -#pragma warning(pop) -#endif - Status DeleteFile(const std::string& f) override { - Status as = a_->DeleteFile(f); - Status bs = b_->DeleteFile(f); - assert(as == bs); - return as; - } - Status CreateDir(const std::string& d) override { - Status as = a_->CreateDir(d); - Status bs = b_->CreateDir(d); - assert(as == bs); - return as; - } - Status CreateDirIfMissing(const std::string& d) override { - Status as = a_->CreateDirIfMissing(d); - Status bs = b_->CreateDirIfMissing(d); - assert(as == bs); - return as; - } - Status DeleteDir(const std::string& d) override { - Status as = a_->DeleteDir(d); - Status bs = b_->DeleteDir(d); - assert(as == bs); - return as; - } - Status GetFileSize(const std::string& f, uint64_t* s) override { - uint64_t asize, bsize; - Status as = a_->GetFileSize(f, &asize); - Status bs = b_->GetFileSize(f, &bsize); - assert(as == bs); - assert(!as.ok() || asize == bsize); - *s = asize; - return as; - } - - Status GetFileModificationTime(const std::string& fname, - uint64_t* file_mtime) override { - uint64_t amtime, bmtime; - Status as = a_->GetFileModificationTime(fname, &amtime); - Status bs = b_->GetFileModificationTime(fname, &bmtime); - assert(as == bs); - assert(!as.ok() || amtime - bmtime < 10000 || bmtime - amtime < 10000); - *file_mtime = amtime; - return as; - } - - Status RenameFile(const std::string& s, const std::string& t) override { - Status as = a_->RenameFile(s, t); - Status bs = b_->RenameFile(s, t); - assert(as == bs); - return as; - } - - Status LinkFile(const std::string& s, const std::string& t) override { - Status as = a_->LinkFile(s, t); - Status bs = b_->LinkFile(s, t); - assert(as == bs); - return as; - } - - class FileLockMirror : public FileLock { - public: - FileLock *a_, *b_; - FileLockMirror(FileLock* a, FileLock* b) : a_(a), b_(b) {} - }; - - Status LockFile(const std::string& f, FileLock** l) override { - FileLock *al, *bl; - Status as = a_->LockFile(f, &al); - Status bs = b_->LockFile(f, &bl); - assert(as == bs); - if (as.ok()) *l = new FileLockMirror(al, bl); - return as; - } - - Status UnlockFile(FileLock* l) override { - FileLockMirror* ml = static_cast(l); - Status as = a_->UnlockFile(ml->a_); - Status bs = b_->UnlockFile(ml->b_); - assert(as == bs); - delete ml; - return as; - } -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/info_log_finder.h b/dist/darwin_amd64/include/rocksdb/utilities/info_log_finder.h deleted file mode 100644 index 824f8a3..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/info_log_finder.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include - -#include "rocksdb/db.h" -#include "rocksdb/options.h" - -namespace ROCKSDB_NAMESPACE { - -// This function can be used to list the Information logs, -// given the db pointer. -Status GetInfoLogList(DB* db, std::vector* info_log_list); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/ldb_cmd.h b/dist/darwin_amd64/include/rocksdb/utilities/ldb_cmd.h deleted file mode 100644 index c7f227f..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/ldb_cmd.h +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "rocksdb/convenience.h" -#include "rocksdb/env.h" -#include "rocksdb/iterator.h" -#include "rocksdb/ldb_tool.h" -#include "rocksdb/options.h" -#include "rocksdb/slice.h" -#include "rocksdb/utilities/db_ttl.h" -#include "rocksdb/utilities/ldb_cmd_execute_result.h" - -namespace ROCKSDB_NAMESPACE { - -class LDBCommand { - public: - // Command-line arguments - static const std::string ARG_ENV_URI; - static const std::string ARG_DB; - static const std::string ARG_PATH; - static const std::string ARG_SECONDARY_PATH; - static const std::string ARG_HEX; - static const std::string ARG_KEY_HEX; - static const std::string ARG_VALUE_HEX; - static const std::string ARG_CF_NAME; - static const std::string ARG_TTL; - static const std::string ARG_TTL_START; - static const std::string ARG_TTL_END; - static const std::string ARG_TIMESTAMP; - static const std::string ARG_TRY_LOAD_OPTIONS; - static const std::string ARG_IGNORE_UNKNOWN_OPTIONS; - static const std::string ARG_FROM; - static const std::string ARG_TO; - static const std::string ARG_MAX_KEYS; - static const std::string ARG_BLOOM_BITS; - static const std::string ARG_FIX_PREFIX_LEN; - static const std::string ARG_COMPRESSION_TYPE; - static const std::string ARG_COMPRESSION_MAX_DICT_BYTES; - static const std::string ARG_BLOCK_SIZE; - static const std::string ARG_AUTO_COMPACTION; - static const std::string ARG_DB_WRITE_BUFFER_SIZE; - static const std::string ARG_WRITE_BUFFER_SIZE; - static const std::string ARG_FILE_SIZE; - static const std::string ARG_CREATE_IF_MISSING; - static const std::string ARG_NO_VALUE; - static const std::string ARG_DISABLE_CONSISTENCY_CHECKS; - - struct ParsedParams { - std::string cmd; - std::vector cmd_params; - std::map option_map; - std::vector flags; - }; - - static LDBCommand* SelectCommand(const ParsedParams& parsed_parms); - - static LDBCommand* InitFromCmdLineArgs( - const std::vector& args, const Options& options, - const LDBOptions& ldb_options, - const std::vector* column_families, - const std::function& selector = - SelectCommand); - - static LDBCommand* InitFromCmdLineArgs( - int argc, char const* const* argv, const Options& options, - const LDBOptions& ldb_options, - const std::vector* column_families); - - bool ValidateCmdLineOptions(); - - virtual void PrepareOptions(); - - virtual void OverrideBaseOptions(); - - virtual void SetDBOptions(Options options) { options_ = options; } - - virtual void SetColumnFamilies( - const std::vector* column_families) { - if (column_families != nullptr) { - column_families_ = *column_families; - } else { - column_families_.clear(); - } - } - - void SetLDBOptions(const LDBOptions& ldb_options) { - ldb_options_ = ldb_options; - } - - const std::map& TEST_GetOptionMap() { - return option_map_; - } - - const std::vector& TEST_GetFlags() { return flags_; } - - virtual bool NoDBOpen() { return false; } - - virtual ~LDBCommand() { CloseDB(); } - - /* Run the command, and return the execute result. */ - void Run(); - - virtual void DoCommand() = 0; - - LDBCommandExecuteResult GetExecuteState() { return exec_state_; } - - void ClearPreviousRunState() { exec_state_.Reset(); } - - // Consider using Slice::DecodeHex directly instead if you don't need the - // 0x prefix - static std::string HexToString(const std::string& str); - - // Consider using Slice::ToString(true) directly instead if - // you don't need the 0x prefix - static std::string StringToHex(const std::string& str); - - static const char* DELIM; - - protected: - LDBCommandExecuteResult exec_state_; - std::string env_uri_; - std::string db_path_; - // If empty, open DB as primary. If non-empty, open the DB as secondary - // with this secondary path. When running against a database opened by - // another process, ldb wll leave the source directory completely intact. - std::string secondary_path_; - std::string column_family_name_; - DB* db_; - DBWithTTL* db_ttl_; - std::map cf_handles_; - - /** - * true implies that this command can work if the db is opened in read-only - * mode. - */ - bool is_read_only_; - - /** If true, the key is input/output as hex in get/put/scan/delete etc. */ - bool is_key_hex_; - - /** If true, the value is input/output as hex in get/put/scan/delete etc. */ - bool is_value_hex_; - - /** If true, the value is treated as timestamp suffixed */ - bool is_db_ttl_; - - // If true, the kvs are output with their insert/modify timestamp in a ttl db - bool timestamp_; - - // If true, try to construct options from DB's option files. - bool try_load_options_; - - // The value passed to options.force_consistency_checks. - bool force_consistency_checks_; - - bool create_if_missing_; - - /** - * Map of options passed on the command-line. - */ - const std::map option_map_; - - /** - * Flags passed on the command-line. - */ - const std::vector flags_; - - /** List of command-line options valid for this command */ - const std::vector valid_cmd_line_options_; - - /** Shared pointer to underlying environment if applicable **/ - std::shared_ptr env_guard_; - - bool ParseKeyValue(const std::string& line, std::string* key, - std::string* value, bool is_key_hex, bool is_value_hex); - - LDBCommand(const std::map& options, - const std::vector& flags, bool is_read_only, - const std::vector& valid_cmd_line_options); - - void OpenDB(); - - void CloseDB(); - - ColumnFamilyHandle* GetCfHandle(); - - static std::string PrintKeyValue(const std::string& key, - const std::string& value, bool is_key_hex, - bool is_value_hex); - - static std::string PrintKeyValue(const std::string& key, - const std::string& value, bool is_hex); - - /** - * Return true if the specified flag is present in the specified flags vector - */ - static bool IsFlagPresent(const std::vector& flags, - const std::string& flag) { - return (std::find(flags.begin(), flags.end(), flag) != flags.end()); - } - - static std::string HelpRangeCmdArgs(); - - /** - * A helper function that returns a list of command line options - * used by this command. It includes the common options and the ones - * passed in. - */ - static std::vector BuildCmdLineOptions( - std::vector options); - - bool ParseIntOption(const std::map& options, - const std::string& option, int& value, - LDBCommandExecuteResult& exec_state); - - bool ParseStringOption(const std::map& options, - const std::string& option, std::string* value); - - /** - * Returns the value of the specified option as a boolean. - * default_val is used if the option is not found in options. - * Throws an exception if the value of the option is not - * "true" or "false" (case insensitive). - */ - bool ParseBooleanOption(const std::map& options, - const std::string& option, bool default_val); - - Options options_; - std::vector column_families_; - ConfigOptions config_options_; - LDBOptions ldb_options_; - - private: - /** - * Interpret command line options and flags to determine if the key - * should be input/output in hex. - */ - bool IsKeyHex(const std::map& options, - const std::vector& flags); - - /** - * Interpret command line options and flags to determine if the value - * should be input/output in hex. - */ - bool IsValueHex(const std::map& options, - const std::vector& flags); - - /** - * Converts val to a boolean. - * val must be either true or false (case insensitive). - * Otherwise an exception is thrown. - */ - bool StringToBool(std::string val); -}; - -class LDBCommandRunner { - public: - static void PrintHelp(const LDBOptions& ldb_options, const char* exec_name, - bool to_stderr = true); - - // Returns the status code to return. 0 is no error. - static int RunCommand( - int argc, char const* const* argv, Options options, - const LDBOptions& ldb_options, - const std::vector* column_families); -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/ldb_cmd_execute_result.h b/dist/darwin_amd64/include/rocksdb/utilities/ldb_cmd_execute_result.h deleted file mode 100644 index c837b47..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/ldb_cmd_execute_result.h +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -#pragma once - -#ifdef FAILED -#undef FAILED -#endif - -namespace ROCKSDB_NAMESPACE { - -class LDBCommandExecuteResult { - public: - enum State { - EXEC_NOT_STARTED = 0, - EXEC_SUCCEED = 1, - EXEC_FAILED = 2, - }; - - LDBCommandExecuteResult() : state_(EXEC_NOT_STARTED), message_("") {} - - LDBCommandExecuteResult(State state, std::string& msg) - : state_(state), message_(msg) {} - - std::string ToString() { - std::string ret; - switch (state_) { - case EXEC_SUCCEED: - break; - case EXEC_FAILED: - ret.append("Failed: "); - break; - case EXEC_NOT_STARTED: - ret.append("Not started: "); - } - if (!message_.empty()) { - ret.append(message_); - } - return ret; - } - - void Reset() { - state_ = EXEC_NOT_STARTED; - message_ = ""; - } - - bool IsSucceed() { return state_ == EXEC_SUCCEED; } - - bool IsNotStarted() { return state_ == EXEC_NOT_STARTED; } - - bool IsFailed() { return state_ == EXEC_FAILED; } - - static LDBCommandExecuteResult Succeed(std::string msg) { - return LDBCommandExecuteResult(EXEC_SUCCEED, msg); - } - - static LDBCommandExecuteResult Failed(std::string msg) { - return LDBCommandExecuteResult(EXEC_FAILED, msg); - } - - private: - State state_; - std::string message_; - - bool operator==(const LDBCommandExecuteResult&); - bool operator!=(const LDBCommandExecuteResult&); -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/leveldb_options.h b/dist/darwin_amd64/include/rocksdb/utilities/leveldb_options.h deleted file mode 100644 index 7e4a6fa..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/leveldb_options.h +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include - -#include "rocksdb/compression_type.h" -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class Cache; -class Comparator; -class Env; -class FilterPolicy; -class Logger; -struct Options; -class Snapshot; - -// Options to control the behavior of a database (passed to -// DB::Open). A LevelDBOptions object can be initialized as though -// it were a LevelDB Options object, and then it can be converted into -// a RocksDB Options object. -struct LevelDBOptions { - // ------------------- - // Parameters that affect behavior - - // Comparator used to define the order of keys in the table. - // Default: a comparator that uses lexicographic byte-wise ordering - // - // REQUIRES: The client must ensure that the comparator supplied - // here has the same name and orders keys *exactly* the same as the - // comparator provided to previous open calls on the same DB. - const Comparator* comparator; - - // If true, the database will be created if it is missing. - // Default: false - bool create_if_missing; - - // If true, an error is raised if the database already exists. - // Default: false - bool error_if_exists; - - // If true, the implementation will do aggressive checking of the - // data it is processing and will stop early if it detects any - // errors. This may have unforeseen ramifications: for example, a - // corruption of one DB entry may cause a large number of entries to - // become unreadable or for the entire DB to become unopenable. - // Default: false - bool paranoid_checks; - - // Use the specified object to interact with the environment, - // e.g. to read/write files, schedule background work, etc. - // Default: Env::Default() - Env* env; - - // Any internal progress/error information generated by the db will - // be written to info_log if it is non-NULL, or to a file stored - // in the same directory as the DB contents if info_log is NULL. - // Default: NULL - Logger* info_log; - - // ------------------- - // Parameters that affect performance - - // Amount of data to build up in memory (backed by an unsorted log - // on disk) before converting to a sorted on-disk file. - // - // Larger values increase performance, especially during bulk loads. - // Up to two write buffers may be held in memory at the same time, - // so you may wish to adjust this parameter to control memory usage. - // Also, a larger write buffer will result in a longer recovery time - // the next time the database is opened. - // - // Default: 4MB - size_t write_buffer_size; - - // Number of open files that can be used by the DB. You may need to - // increase this if your database has a large working set (budget - // one open file per 2MB of working set). - // - // Default: 1000 - int max_open_files; - - // Control over blocks (user data is stored in a set of blocks, and - // a block is the unit of reading from disk). - - // If non-NULL, use the specified cache for blocks. - // If NULL, leveldb will automatically create and use an 8MB internal cache. - // Default: NULL - Cache* block_cache; - - // Approximate size of user data packed per block. Note that the - // block size specified here corresponds to uncompressed data. The - // actual size of the unit read from disk may be smaller if - // compression is enabled. This parameter can be changed dynamically. - // - // Default: 4K - size_t block_size; - - // Number of keys between restart points for delta encoding of keys. - // This parameter can be changed dynamically. Most clients should - // leave this parameter alone. - // - // Default: 16 - int block_restart_interval; - - // Compress blocks using the specified compression algorithm. This - // parameter can be changed dynamically. - // - // Default: kSnappyCompression, which gives lightweight but fast - // compression. - // - // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: - // ~200-500MB/s compression - // ~400-800MB/s decompression - // Note that these speeds are significantly faster than most - // persistent storage speeds, and therefore it is typically never - // worth switching to kNoCompression. Even if the input data is - // incompressible, the kSnappyCompression implementation will - // efficiently detect that and will switch to uncompressed mode. - CompressionType compression; - - // If non-NULL, use the specified filter policy to reduce disk reads. - // Many applications will benefit from passing the result of - // NewBloomFilterPolicy() here. - // - // Default: NULL - const FilterPolicy* filter_policy; - - // Create a LevelDBOptions object with default values for all fields. - LevelDBOptions(); -}; - -// Converts a LevelDBOptions object into a RocksDB Options object. -Options ConvertOptions(const LevelDBOptions& leveldb_options); - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/lua/rocks_lua_custom_library.h b/dist/darwin_amd64/include/rocksdb/utilities/lua/rocks_lua_custom_library.h deleted file mode 100644 index f617da0..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/lua/rocks_lua_custom_library.h +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifdef LUA - -// lua headers -extern "C" { -#include -#include -#include -} - -namespace ROCKSDB_NAMESPACE { -namespace lua { -// A class that used to define custom C Library that is callable -// from Lua script -class RocksLuaCustomLibrary { - public: - virtual ~RocksLuaCustomLibrary() {} - // The name of the C library. This name will also be used as the table - // (namespace) in Lua that contains the C library. - virtual const char* Name() const = 0; - - // Returns a "static const struct luaL_Reg[]", which includes a list of - // C functions. Note that the last entry of this static array must be - // {nullptr, nullptr} as required by Lua. - // - // More details about how to implement Lua C libraries can be found - // in the official Lua document http://www.lua.org/pil/26.2.html - virtual const struct luaL_Reg* Lib() const = 0; - - // A function that will be called right after the library has been created - // and pushed on the top of the lua_State. This custom setup function - // allows developers to put additional table or constant values inside - // the same table / namespace. - virtual void CustomSetup(lua_State* /*L*/) const {} -}; -} // namespace lua -} // namespace ROCKSDB_NAMESPACE -#endif // LUA diff --git a/dist/darwin_amd64/include/rocksdb/utilities/lua/rocks_lua_util.h b/dist/darwin_amd64/include/rocksdb/utilities/lua/rocks_lua_util.h deleted file mode 100644 index 3427b65..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/lua/rocks_lua_util.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -// lua headers -extern "C" { -#include -#include -#include -} - -#ifdef LUA -#include -#include - -#include "rocksdb/utilities/lua/rocks_lua_custom_library.h" - -namespace ROCKSDB_NAMESPACE { -namespace lua { -class LuaStateWrapper { - public: - explicit LuaStateWrapper(const std::string& lua_script) { - lua_state_ = luaL_newstate(); - Init(lua_script, {}); - } - LuaStateWrapper( - const std::string& lua_script, - const std::vector>& libraries) { - lua_state_ = luaL_newstate(); - Init(lua_script, libraries); - } - lua_State* GetLuaState() const { return lua_state_; } - ~LuaStateWrapper() { lua_close(lua_state_); } - - private: - void Init( - const std::string& lua_script, - const std::vector>& libraries) { - if (lua_state_) { - luaL_openlibs(lua_state_); - for (const auto& library : libraries) { - luaL_openlib(lua_state_, library->Name(), library->Lib(), 0); - library->CustomSetup(lua_state_); - } - luaL_dostring(lua_state_, lua_script.c_str()); - } - } - - lua_State* lua_state_; -}; -} // namespace lua -} // namespace ROCKSDB_NAMESPACE -#endif // LUA diff --git a/dist/darwin_amd64/include/rocksdb/utilities/memory_util.h b/dist/darwin_amd64/include/rocksdb/utilities/memory_util.h deleted file mode 100644 index 4f1606b..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/memory_util.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#ifndef ROCKSDB_LITE - -#pragma once - -#include -#include -#include -#include - -#include "rocksdb/cache.h" -#include "rocksdb/db.h" - -namespace ROCKSDB_NAMESPACE { - -// Returns the current memory usage of the specified DB instances. -class MemoryUtil { - public: - enum UsageType : int { - // Memory usage of all the mem-tables. - kMemTableTotal = 0, - // Memory usage of those un-flushed mem-tables. - kMemTableUnFlushed = 1, - // Memory usage of all the table readers. - kTableReadersTotal = 2, - // Memory usage by Cache. - kCacheTotal = 3, - kNumUsageTypes = 4 - }; - - // Returns the approximate memory usage of different types in the input - // list of DBs and Cache set. For instance, in the output map - // usage_by_type, usage_by_type[kMemTableTotal] will store the memory - // usage of all the mem-tables from all the input rocksdb instances. - // - // Note that for memory usage inside Cache class, we will - // only report the usage of the input "cache_set" without - // including those Cache usage inside the input list "dbs" - // of DBs. - static Status GetApproximateMemoryUsageByType( - const std::vector& dbs, - const std::unordered_set cache_set, - std::map* usage_by_type); -}; -} // namespace ROCKSDB_NAMESPACE -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/object_registry.h b/dist/darwin_amd64/include/rocksdb/utilities/object_registry.h deleted file mode 100644 index 538cb6a..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/object_registry.h +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include -#include -#include -#include -#include -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { -class Logger; -// Returns a new T when called with a string. Populates the std::unique_ptr -// argument if granting ownership to caller. -template -using FactoryFunc = - std::function*, std::string*)>; - -class ObjectLibrary { - public: - // Base class for an Entry in the Registry. - class Entry { - public: - virtual ~Entry() {} - Entry(const std::string& name) : name_(std::move(name)) {} - - // Checks to see if the target matches this entry - virtual bool matches(const std::string& target) const { - return name_ == target; - } - const std::string& Name() const { return name_; } - - private: - const std::string name_; // The name of the Entry - }; // End class Entry - - // An Entry containing a FactoryFunc for creating new Objects - template - class FactoryEntry : public Entry { - public: - FactoryEntry(const std::string& name, FactoryFunc f) - : Entry(name), pattern_(std::move(name)), factory_(std::move(f)) {} - ~FactoryEntry() override {} - bool matches(const std::string& target) const override { - return std::regex_match(target, pattern_); - } - // Creates a new T object. - T* NewFactoryObject(const std::string& target, std::unique_ptr* guard, - std::string* msg) const { - return factory_(target, guard, msg); - } - - private: - std::regex pattern_; // The pattern for this entry - FactoryFunc factory_; - }; // End class FactoryEntry - public: - // Finds the entry matching the input name and type - const Entry* FindEntry(const std::string& type, - const std::string& name) const; - void Dump(Logger* logger) const; - - // Registers the factory with the library for the pattern. - // If the pattern matches, the factory may be used to create a new object. - template - const FactoryFunc& Register(const std::string& pattern, - const FactoryFunc& factory) { - std::unique_ptr entry(new FactoryEntry(pattern, factory)); - AddEntry(T::Type(), entry); - return factory; - } - // Returns the default ObjectLibrary - static std::shared_ptr& Default(); - - private: - // Adds the input entry to the list for the given type - void AddEntry(const std::string& type, std::unique_ptr& entry); - - // ** FactoryFunctions for this loader, organized by type - std::unordered_map>> entries_; -}; - -// The ObjectRegistry is used to register objects that can be created by a -// name/pattern at run-time where the specific implementation of the object may -// not be known in advance. -class ObjectRegistry { - public: - static std::shared_ptr NewInstance(); - - ObjectRegistry(); - - void AddLibrary(const std::shared_ptr& library) { - libraries_.emplace_back(library); - } - - // Creates a new T using the factory function that was registered with a - // pattern that matches the provided "target" string according to - // std::regex_match. - // - // If no registered functions match, returns nullptr. If multiple functions - // match, the factory function used is unspecified. - // - // Populates res_guard with result pointer if caller is granted ownership. - template - T* NewObject(const std::string& target, std::unique_ptr* guard, - std::string* errmsg) { - guard->reset(); - const auto* basic = FindEntry(T::Type(), target); - if (basic != nullptr) { - const auto* factory = - static_cast*>(basic); - return factory->NewFactoryObject(target, guard, errmsg); - } else { - *errmsg = std::string("Could not load ") + T::Type(); - return nullptr; - } - } - - // Creates a new unique T using the input factory functions. - // Returns OK if a new unique T was successfully created - // Returns NotSupported if the type/target could not be created - // Returns InvalidArgument if the factory return an unguarded object - // (meaning it cannot be managed by a unique ptr) - template - Status NewUniqueObject(const std::string& target, - std::unique_ptr* result) { - std::string errmsg; - T* ptr = NewObject(target, result, &errmsg); - if (ptr == nullptr) { - return Status::NotSupported(errmsg, target); - } else if (*result) { - return Status::OK(); - } else { - return Status::InvalidArgument(std::string("Cannot make a unique ") + - T::Type() + " from unguarded one ", - target); - } - } - - // Creates a new shared T using the input factory functions. - // Returns OK if a new shared T was successfully created - // Returns NotSupported if the type/target could not be created - // Returns InvalidArgument if the factory return an unguarded object - // (meaning it cannot be managed by a shared ptr) - template - Status NewSharedObject(const std::string& target, - std::shared_ptr* result) { - std::string errmsg; - std::unique_ptr guard; - T* ptr = NewObject(target, &guard, &errmsg); - if (ptr == nullptr) { - return Status::NotSupported(errmsg, target); - } else if (guard) { - result->reset(guard.release()); - return Status::OK(); - } else { - return Status::InvalidArgument(std::string("Cannot make a shared ") + - T::Type() + " from unguarded one ", - target); - } - } - - // Creates a new static T using the input factory functions. - // Returns OK if a new static T was successfully created - // Returns NotSupported if the type/target could not be created - // Returns InvalidArgument if the factory return a guarded object - // (meaning it is managed by a unique ptr) - template - Status NewStaticObject(const std::string& target, T** result) { - std::string errmsg; - std::unique_ptr guard; - T* ptr = NewObject(target, &guard, &errmsg); - if (ptr == nullptr) { - return Status::NotSupported(errmsg, target); - } else if (guard.get()) { - return Status::InvalidArgument(std::string("Cannot make a static ") + - T::Type() + " from a guarded one ", - target); - } else { - *result = ptr; - return Status::OK(); - } - } - - // Dump the contents of the registry to the logger - void Dump(Logger* logger) const; - - private: - const ObjectLibrary::Entry* FindEntry(const std::string& type, - const std::string& name) const; - - // The set of libraries to search for factories for this registry. - // The libraries are searched in reverse order (back to front) when - // searching for entries. - std::vector> libraries_; -}; -} // namespace ROCKSDB_NAMESPACE -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/optimistic_transaction_db.h b/dist/darwin_amd64/include/rocksdb/utilities/optimistic_transaction_db.h deleted file mode 100644 index 5356df7..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/optimistic_transaction_db.h +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifndef ROCKSDB_LITE - -#include -#include - -#include "rocksdb/comparator.h" -#include "rocksdb/db.h" -#include "rocksdb/utilities/stackable_db.h" - -namespace ROCKSDB_NAMESPACE { - -class Transaction; - -// Database with Transaction support. -// -// See optimistic_transaction.h and examples/transaction_example.cc - -// Options to use when starting an Optimistic Transaction -struct OptimisticTransactionOptions { - // Setting set_snapshot=true is the same as calling SetSnapshot(). - bool set_snapshot = false; - - // Should be set if the DB has a non-default comparator. - // See comment in WriteBatchWithIndex constructor. - const Comparator* cmp = BytewiseComparator(); -}; - -enum class OccValidationPolicy { - // Validate serially at commit stage, AFTER entering the write-group. - // Isolation validation is processed single-threaded(since in the - // write-group). - // May suffer from high mutex contention, as per this link: - // https://github.com/facebook/rocksdb/issues/4402 - kValidateSerial = 0, - // Validate parallelly before commit stage, BEFORE entering the write-group to - // reduce mutex contention. Each txn acquires locks for its write-set - // records in some well-defined order. - kValidateParallel = 1 -}; - -struct OptimisticTransactionDBOptions { - OccValidationPolicy validate_policy = OccValidationPolicy::kValidateParallel; - - // works only if validate_policy == OccValidationPolicy::kValidateParallel - uint32_t occ_lock_buckets = (1 << 20); -}; - -class OptimisticTransactionDB : public StackableDB { - public: - // Open an OptimisticTransactionDB similar to DB::Open(). - static Status Open(const Options& options, const std::string& dbname, - OptimisticTransactionDB** dbptr); - - static Status Open(const DBOptions& db_options, const std::string& dbname, - const std::vector& column_families, - std::vector* handles, - OptimisticTransactionDB** dbptr); - - static Status Open(const DBOptions& db_options, - const OptimisticTransactionDBOptions& occ_options, - const std::string& dbname, - const std::vector& column_families, - std::vector* handles, - OptimisticTransactionDB** dbptr); - - virtual ~OptimisticTransactionDB() {} - - // Starts a new Transaction. - // - // Caller is responsible for deleting the returned transaction when no - // longer needed. - // - // If old_txn is not null, BeginTransaction will reuse this Transaction - // handle instead of allocating a new one. This is an optimization to avoid - // extra allocations when repeatedly creating transactions. - virtual Transaction* BeginTransaction( - const WriteOptions& write_options, - const OptimisticTransactionOptions& txn_options = - OptimisticTransactionOptions(), - Transaction* old_txn = nullptr) = 0; - - OptimisticTransactionDB(const OptimisticTransactionDB&) = delete; - void operator=(const OptimisticTransactionDB&) = delete; - - protected: - // To Create an OptimisticTransactionDB, call Open() - explicit OptimisticTransactionDB(DB* db) : StackableDB(db) {} -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/option_change_migration.h b/dist/darwin_amd64/include/rocksdb/utilities/option_change_migration.h deleted file mode 100644 index cb1d0d1..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/option_change_migration.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include "rocksdb/options.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { -// Try to migrate DB created with old_opts to be use new_opts. -// Multiple column families is not supported. -// It is best-effort. No guarantee to succeed. -// A full compaction may be executed. -Status OptionChangeMigration(std::string dbname, const Options& old_opts, - const Options& new_opts); -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/options_type.h b/dist/darwin_amd64/include/rocksdb/utilities/options_type.h deleted file mode 100644 index 2bd081a..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/options_type.h +++ /dev/null @@ -1,763 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include - -#include "rocksdb/convenience.h" -#include "rocksdb/rocksdb_namespace.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { -class OptionTypeInfo; - -// The underlying "class/type" of the option. -// This enum is used to determine how the option should -// be converted to/from strings and compared. -enum class OptionType { - kBoolean, - kInt, - kInt32T, - kInt64T, - kUInt, - kUInt32T, - kUInt64T, - kSizeT, - kString, - kDouble, - kCompactionStyle, - kCompactionPri, - kSliceTransform, - kCompressionType, - kComparator, - kCompactionFilter, - kCompactionFilterFactory, - kCompactionStopStyle, - kMergeOperator, - kMemTableRepFactory, - kFilterPolicy, - kFlushBlockPolicyFactory, - kChecksumType, - kEncodingType, - kEnv, - kEnum, - kStruct, - kVector, - kConfigurable, - kUnknown, -}; - -enum class OptionVerificationType { - kNormal, - kByName, // The option is pointer typed so we can only verify - // based on it's name. - kByNameAllowNull, // Same as kByName, but it also allows the case - // where one of them is a nullptr. - kByNameAllowFromNull, // Same as kByName, but it also allows the case - // where the old option is nullptr. - kDeprecated, // The option is no longer used in rocksdb. The RocksDB - // OptionsParser will still accept this option if it - // happen to exists in some Options file. However, - // the parser will not include it in serialization - // and verification processes. - kAlias, // This option represents is a name/shortcut for - // another option and should not be written or verified - // independently -}; - -// A set of modifier flags used to alter how an option is evaluated or -// processed. These flags can be combined together (e.g. kMutable | kShared). -// The kCompare flags can be used to control if/when options are compared. -// If kCompareNever is set, two related options would never be compared (always -// equal) If kCompareExact is set, the options will only be compared if the -// sanity mode -// is exact -// kMutable means the option can be changed after it is prepared -// kShared means the option is contained in a std::shared_ptr -// kUnique means the option is contained in a std::uniqued_ptr -// kRawPointer means the option is a raw pointer value. -// kAllowNull means that an option is allowed to be null for verification -// purposes. -// kDontSerialize means this option should not be serialized and included in -// the string representation. -// kDontPrepare means do not call PrepareOptions for this pointer value. -enum class OptionTypeFlags : uint32_t { - kNone = 0x00, // No flags - kCompareDefault = 0x0, - kCompareNever = ConfigOptions::kSanityLevelNone, - kCompareLoose = ConfigOptions::kSanityLevelLooselyCompatible, - kCompareExact = ConfigOptions::kSanityLevelExactMatch, - - kMutable = 0x0100, // Option is mutable - kRawPointer = 0x0200, // The option is stored as a raw pointer - kShared = 0x0400, // The option is stored as a shared_ptr - kUnique = 0x0800, // The option is stored as a unique_ptr - kAllowNull = 0x1000, // The option can be null - kDontSerialize = 0x2000, // Don't serialize the option - kDontPrepare = 0x4000, // Don't prepare or sanitize this option -}; - -inline OptionTypeFlags operator|(const OptionTypeFlags &a, - const OptionTypeFlags &b) { - return static_cast(static_cast(a) | - static_cast(b)); -} - -inline OptionTypeFlags operator&(const OptionTypeFlags &a, - const OptionTypeFlags &b) { - return static_cast(static_cast(a) & - static_cast(b)); -} - -// Converts an string into its enumerated value. -// @param type_map Mapping between strings and enum values -// @param type The string representation of the enum -// @param value Returns the enum value represented by the string -// @return true if the string was found in the enum map, false otherwise. -template -bool ParseEnum(const std::unordered_map& type_map, - const std::string& type, T* value) { - auto iter = type_map.find(type); - if (iter != type_map.end()) { - *value = iter->second; - return true; - } - return false; -} - -// Converts an enum into its string representation. -// @param type_map Mapping between strings and enum values -// @param type The enum -// @param value Returned as the string representation of the enum -// @return true if the enum was found in the enum map, false otherwise. -template -bool SerializeEnum(const std::unordered_map& type_map, - const T& type, std::string* value) { - for (const auto& pair : type_map) { - if (pair.second == type) { - *value = pair.first; - return true; - } - } - return false; -} - -template -Status ParseVector(const ConfigOptions& config_options, - const OptionTypeInfo& elem_info, char separator, - const std::string& name, const std::string& value, - std::vector* result); - -template -Status SerializeVector(const ConfigOptions& config_options, - const OptionTypeInfo& elem_info, char separator, - const std::string& name, const std::vector& vec, - std::string* value); -template -bool VectorsAreEqual(const ConfigOptions& config_options, - const OptionTypeInfo& elem_info, const std::string& name, - const std::vector& vec1, const std::vector& vec2, - std::string* mismatch); - -// Function for converting a option string value into its underlying -// representation in "addr" -// On success, Status::OK is returned and addr is set to the parsed form -// On failure, a non-OK status is returned -// @param opts The ConfigOptions controlling how the value is parsed -// @param name The name of the options being parsed -// @param value The string representation of the option -// @param addr Pointer to the object -using ParseFunc = std::function; - -// Function for converting an option "addr" into its string representation. -// On success, Status::OK is returned and value is the serialized form. -// On failure, a non-OK status is returned -// @param opts The ConfigOptions controlling how the values are serialized -// @param name The name of the options being serialized -// @param addr Pointer to the value being serialized -// @param value The result of the serialization. -using SerializeFunc = std::function; - -// Function for comparing two option values -// If they are not equal, updates "mismatch" with the name of the bad option -// @param opts The ConfigOptions controlling how the values are compared -// @param name The name of the options being compared -// @param addr1 The first address to compare -// @param addr2 The address to compare to -// @param mismatch If the values are not equal, the name of the option that -// first differs -using EqualsFunc = std::function; - -// A struct for storing constant option information such as option name, -// option type, and offset. -class OptionTypeInfo { - public: - // A simple "normal", non-mutable Type "type" at offset - OptionTypeInfo(int offset, OptionType type) - : offset_(offset), - parse_func_(nullptr), - serialize_func_(nullptr), - equals_func_(nullptr), - type_(type), - verification_(OptionVerificationType::kNormal), - flags_(OptionTypeFlags::kNone) {} - - OptionTypeInfo(int offset, OptionType type, - OptionVerificationType verification, OptionTypeFlags flags) - : offset_(offset), - parse_func_(nullptr), - serialize_func_(nullptr), - equals_func_(nullptr), - type_(type), - verification_(verification), - flags_(flags) {} - - OptionTypeInfo(int offset, OptionType type, - OptionVerificationType verification, OptionTypeFlags flags, - const ParseFunc& parse_func) - : offset_(offset), - parse_func_(parse_func), - serialize_func_(nullptr), - equals_func_(nullptr), - type_(type), - verification_(verification), - flags_(flags) {} - - OptionTypeInfo(int offset, OptionType type, - OptionVerificationType verification, OptionTypeFlags flags, - const ParseFunc& parse_func, - const SerializeFunc& serialize_func, - const EqualsFunc& equals_func) - : offset_(offset), - parse_func_(parse_func), - serialize_func_(serialize_func), - equals_func_(equals_func), - type_(type), - verification_(verification), - flags_(flags) {} - - // Creates an OptionTypeInfo for an enum type. Enums use an additional - // map to convert the enums to/from their string representation. - // To create an OptionTypeInfo that is an Enum, one should: - // - Create a static map of string values to the corresponding enum value - // - Call this method passing the static map in as a parameter. - // Note that it is not necessary to add a new OptionType or make any - // other changes -- the returned object handles parsing, serialiation, and - // comparisons. - // - // @param offset The offset in the option object for this enum - // @param map The string to enum mapping for this enum - template - static OptionTypeInfo Enum( - int offset, const std::unordered_map* const map) { - return OptionTypeInfo( - offset, OptionType::kEnum, OptionVerificationType::kNormal, - OptionTypeFlags::kNone, - // Uses the map argument to convert the input string into - // its corresponding enum value. If value is found in the map, - // addr is updated to the corresponding map entry. - // @return OK if the value is found in the map - // @return InvalidArgument if the value is not found in the map - [map](const ConfigOptions&, const std::string& name, - const std::string& value, char* addr) { - if (map == nullptr) { - return Status::NotSupported("No enum mapping ", name); - } else if (ParseEnum(*map, value, reinterpret_cast(addr))) { - return Status::OK(); - } else { - return Status::InvalidArgument("No mapping for enum ", name); - } - }, - // Uses the map argument to convert the input enum into - // its corresponding string value. If enum value is found in the map, - // value is updated to the corresponding string value in the map. - // @return OK if the enum is found in the map - // @return InvalidArgument if the enum is not found in the map - [map](const ConfigOptions&, const std::string& name, const char* addr, - std::string* value) { - if (map == nullptr) { - return Status::NotSupported("No enum mapping ", name); - } else if (SerializeEnum(*map, (*reinterpret_cast(addr)), - value)) { - return Status::OK(); - } else { - return Status::InvalidArgument("No mapping for enum ", name); - } - }, - // Casts addr1 and addr2 to the enum type and returns true if - // they are equal, false otherwise. - [](const ConfigOptions&, const std::string&, const char* addr1, - const char* addr2, std::string*) { - return (*reinterpret_cast(addr1) == - *reinterpret_cast(addr2)); - }); - } // End OptionTypeInfo::Enum - - // Creates an OptionTypeInfo for a Struct type. Structs have a - // map of string-OptionTypeInfo associated with them that describes how - // to process the object for parsing, serializing, and matching. - // Structs also have a struct_name, which is the name of the object - // as registered in the parent map. - // When processing a struct, the option name can be specified as: - // - Meaning to process the entire struct. - // - Meaning to process the single field - // - Process the single fields - // The CompactionOptionsFIFO, CompactionOptionsUniversal, and LRUCacheOptions - // are all examples of Struct options. - // - // To create an OptionTypeInfo that is a Struct, one should: - // - Create a static map of string-OptionTypeInfo corresponding to the - // properties of the object that can be set via the options. - // - Call this method passing the name and map in as parameters. - // Note that it is not necessary to add a new OptionType or make any - // other changes -- the returned object handles parsing, serialization, and - // comparisons. - // - // @param offset The offset in the option object for this enum - // @param map The string to enum mapping for this enum - static OptionTypeInfo Struct( - const std::string& struct_name, - const std::unordered_map* struct_map, - int offset, OptionVerificationType verification, OptionTypeFlags flags) { - return OptionTypeInfo( - offset, OptionType::kStruct, verification, flags, - // Parses the struct and updates the fields at addr - [struct_name, struct_map](const ConfigOptions& opts, - const std::string& name, - const std::string& value, char* addr) { - return ParseStruct(opts, struct_name, struct_map, name, value, addr); - }, - // Serializes the struct options into value - [struct_name, struct_map](const ConfigOptions& opts, - const std::string& name, const char* addr, - std::string* value) { - return SerializeStruct(opts, struct_name, struct_map, name, addr, - value); - }, - // Compares the struct fields of addr1 and addr2 for equality - [struct_name, struct_map](const ConfigOptions& opts, - const std::string& name, const char* addr1, - const char* addr2, std::string* mismatch) { - return StructsAreEqual(opts, struct_name, struct_map, name, addr1, - addr2, mismatch); - }); - } - static OptionTypeInfo Struct( - const std::string& struct_name, - const std::unordered_map* struct_map, - int offset, OptionVerificationType verification, OptionTypeFlags flags, - const ParseFunc& parse_func) { - return OptionTypeInfo( - offset, OptionType::kStruct, verification, flags, parse_func, - [struct_name, struct_map](const ConfigOptions& opts, - const std::string& name, const char* addr, - std::string* value) { - return SerializeStruct(opts, struct_name, struct_map, name, addr, - value); - }, - [struct_name, struct_map](const ConfigOptions& opts, - const std::string& name, const char* addr1, - const char* addr2, std::string* mismatch) { - return StructsAreEqual(opts, struct_name, struct_map, name, addr1, - addr2, mismatch); - }); - } - - template - static OptionTypeInfo Vector(int _offset, - OptionVerificationType _verification, - OptionTypeFlags _flags, - const OptionTypeInfo& elem_info, - char separator = ':') { - return OptionTypeInfo( - _offset, OptionType::kVector, _verification, _flags, - [elem_info, separator](const ConfigOptions& opts, - const std::string& name, - const std::string& value, char* addr) { - auto result = reinterpret_cast*>(addr); - return ParseVector(opts, elem_info, separator, name, value, - result); - }, - [elem_info, separator](const ConfigOptions& opts, - const std::string& name, const char* addr, - std::string* value) { - const auto& vec = *(reinterpret_cast*>(addr)); - return SerializeVector(opts, elem_info, separator, name, vec, - value); - }, - [elem_info](const ConfigOptions& opts, const std::string& name, - const char* addr1, const char* addr2, - std::string* mismatch) { - const auto& vec1 = *(reinterpret_cast*>(addr1)); - const auto& vec2 = *(reinterpret_cast*>(addr2)); - return VectorsAreEqual(opts, elem_info, name, vec1, vec2, - mismatch); - }); - } - - bool IsEnabled(OptionTypeFlags otf) const { return (flags_ & otf) == otf; } - - bool IsMutable() const { return IsEnabled(OptionTypeFlags::kMutable); } - - bool IsDeprecated() const { - return IsEnabled(OptionVerificationType::kDeprecated); - } - - // Returns true if the option is marked as an Alias. - // Aliases are valid options that are parsed but are not converted to strings - // or compared. - bool IsAlias() const { return IsEnabled(OptionVerificationType::kAlias); } - - bool IsEnabled(OptionVerificationType ovf) const { - return verification_ == ovf; - } - - // Returns the sanity level for comparing the option. - // If the options should not be compared, returns None - // If the option has a compare flag, returns it. - // Otherwise, returns "exact" - ConfigOptions::SanityLevel GetSanityLevel() const { - if (IsDeprecated() || IsAlias()) { - return ConfigOptions::SanityLevel::kSanityLevelNone; - } else { - auto match = (flags_ & OptionTypeFlags::kCompareExact); - if (match == OptionTypeFlags::kCompareDefault) { - return ConfigOptions::SanityLevel::kSanityLevelExactMatch; - } else { - return (ConfigOptions::SanityLevel)match; - } - } - } - - // Returns true if the option should be serialized. - // Options should be serialized if the are not deprecated, aliases, - // or marked as "Don't Serialize". - bool ShouldSerialize() const { - if (IsDeprecated() || IsAlias()) { - return false; - } else if (IsEnabled(OptionTypeFlags::kDontSerialize)) { - return false; - } else { - return true; - } - } - - // Returns true if the option is allowed to be null. - // Options can be null if the verification type is allow from null - // or if the flags specify allow null. - bool CanBeNull() const { - return (IsEnabled(OptionTypeFlags::kAllowNull) || - IsEnabled(OptionVerificationType::kByNameAllowFromNull)); - } - - bool IsSharedPtr() const { return IsEnabled(OptionTypeFlags::kShared); } - - bool IsUniquePtr() const { return IsEnabled(OptionTypeFlags::kUnique); } - - bool IsRawPtr() const { return IsEnabled(OptionTypeFlags::kRawPointer); } - - bool IsByName() const { - return (verification_ == OptionVerificationType::kByName || - verification_ == OptionVerificationType::kByNameAllowNull || - verification_ == OptionVerificationType::kByNameAllowFromNull); - } - - bool IsStruct() const { return (type_ == OptionType::kStruct); } - - bool IsConfigurable() const { return (type_ == OptionType::kConfigurable); } - - // Returns the underlying pointer for the type at base_addr - // The value returned is the underlying "raw" pointer, offset from base. - template - const T* AsRawPointer(const void* const base_addr) const { - if (base_addr == nullptr) { - return nullptr; - } - const auto opt_addr = reinterpret_cast(base_addr) + offset_; - if (IsUniquePtr()) { - const std::unique_ptr* ptr = - reinterpret_cast*>(opt_addr); - return ptr->get(); - } else if (IsSharedPtr()) { - const std::shared_ptr* ptr = - reinterpret_cast*>(opt_addr); - return ptr->get(); - } else if (IsRawPtr()) { - const T* const* ptr = reinterpret_cast(opt_addr); - return *ptr; - } else { - return reinterpret_cast(opt_addr); - } - } - - // Returns the underlying pointer for the type at base_addr - // The value returned is the underlying "raw" pointer, offset from base. - template - T* AsRawPointer(void* base_addr) const { - if (base_addr == nullptr) { - return nullptr; - } - auto opt_addr = reinterpret_cast(base_addr) + offset_; - if (IsUniquePtr()) { - std::unique_ptr* ptr = reinterpret_cast*>(opt_addr); - return ptr->get(); - } else if (IsSharedPtr()) { - std::shared_ptr* ptr = reinterpret_cast*>(opt_addr); - return ptr->get(); - } else if (IsRawPtr()) { - T** ptr = reinterpret_cast(opt_addr); - return *ptr; - } else { - return reinterpret_cast(opt_addr); - } - } - - // Parses the option in "opt_value" according to the rules of this class - // and updates the value at "opt_ptr". - // On success, Status::OK() is returned. On failure: - // NotFound means the opt_name is not valid for this option - // NotSupported means we do not know how to parse the value for this option - // InvalidArgument means the opt_value is not valid for this option. - Status Parse(const ConfigOptions& config_options, const std::string& opt_name, - const std::string& opt_value, void* const opt_ptr) const; - - // Serializes the option in "opt_addr" according to the rules of this class - // into the value at "opt_value". - Status Serialize(const ConfigOptions& config_options, - const std::string& opt_name, const void* const opt_ptr, - std::string* opt_value) const; - - // Compares the "addr1" and "addr2" values according to the rules of this - // class and returns true if they match. On a failed match, mismatch is the - // name of the option that failed to match. - bool AreEqual(const ConfigOptions& config_options, - const std::string& opt_name, const void* const addr1, - const void* const addr2, std::string* mismatch) const; - - // Used to override the match rules for "ByName" options. - bool AreEqualByName(const ConfigOptions& config_options, - const std::string& opt_name, const void* const this_ptr, - const void* const that_ptr) const; - bool AreEqualByName(const ConfigOptions& config_options, - const std::string& opt_name, const void* const this_ptr, - const std::string& that_value) const; - - // Parses the input value according to the map for the struct at opt_addr - // struct_name is the name of the struct option as registered - // opt_name is the name of the option being evaluated. This may - // be the whole struct or a sub-element of it, based on struct_name and - // opt_name. - static Status ParseStruct( - const ConfigOptions& config_options, const std::string& struct_name, - const std::unordered_map* map, - const std::string& opt_name, const std::string& value, char* opt_addr); - - // Serializes the input addr according to the map for the struct to value. - // struct_name is the name of the struct option as registered - // opt_name is the name of the option being evaluated. This may - // be the whole struct or a sub-element of it - static Status SerializeStruct( - const ConfigOptions& config_options, const std::string& struct_name, - const std::unordered_map* map, - const std::string& opt_name, const char* opt_addr, std::string* value); - - // Compares the input offsets according to the map for the struct and returns - // true if they are equivalent, false otherwise. - // struct_name is the name of the struct option as registered - // opt_name is the name of the option being evaluated. This may - // be the whole struct or a sub-element of it - static bool StructsAreEqual( - const ConfigOptions& config_options, const std::string& struct_name, - const std::unordered_map* map, - const std::string& opt_name, const char* this_offset, - const char* that_offset, std::string* mismatch); - - // Finds the entry for the opt_name in the opt_map, returning - // nullptr if not found. - // If found, elem_name will be the name of option to find. - // This may be opt_name, or a substring of opt_name. - // For "simple" options, opt_name will be equal to elem_name. Given the - // opt_name "opt", elem_name will equal "opt". - // For "embedded" options (like structs), elem_name may be opt_name - // or a field within the opt_name. For example, given the struct "struct", - // and opt_name of "struct.field", elem_name will be "field" - static const OptionTypeInfo* Find( - const std::string& opt_name, - const std::unordered_map& opt_map, - std::string* elem_name); - - // Returns the next token marked by the delimiter from "opts" after start in - // token and updates end to point to where that token stops. Delimiters inside - // of braces are ignored. Returns OK if a token is found and an error if the - // input opts string is mis-formatted. - // Given "a=AA;b=BB;" start=2 and delimiter=";", token is "AA" and end points - // to "b" Given "{a=A;b=B}", the token would be "a=A;b=B" - // - // @param opts The string in which to find the next token - // @param delimiter The delimiter between tokens - // @param start The position in opts to start looking for the token - // @parem ed Returns the end position in opts of the token - // @param token Returns the token - // @returns OK if a token was found - // @return InvalidArgument if the braces mismatch - // (e.g. "{a={b=c;}" ) -- missing closing brace - // @return InvalidArgument if an expected delimiter is not found - // e.g. "{a=b}c=d;" -- missing delimiter before "c" - static Status NextToken(const std::string& opts, char delimiter, size_t start, - size_t* end, std::string* token); - - private: - int offset_; - - // The optional function to convert a string to its representation - ParseFunc parse_func_; - - // The optional function to convert a value to its string representation - SerializeFunc serialize_func_; - - // The optional function to match two option values - EqualsFunc equals_func_; - - OptionType type_; - OptionVerificationType verification_; - OptionTypeFlags flags_; -}; - -// Parses the input value into elements of the result vector. This method -// will break the input value into the individual tokens (based on the -// separator), where each of those tokens will be parsed based on the rules of -// elem_info. The result vector will be populated with elements based on the -// input tokens. For example, if the value=1:2:3:4:5 and elem_info parses -// integers, the result vector will contain the integers 1,2,3,4,5 -// @param config_options Controls how the option value is parsed. -// @param elem_info Controls how individual tokens in value are parsed -// @param separator Character separating tokens in values (':' in the above -// example) -// @param name The name associated with this vector option -// @param value The input string to parse into tokens -// @param result Returns the results of parsing value into its elements. -// @return OK if the value was successfully parse -// @return InvalidArgument if the value is improperly formed or if the token -// could not be parsed -// @return NotFound If the tokenized value contains unknown options for -// its type -template -Status ParseVector(const ConfigOptions& config_options, - const OptionTypeInfo& elem_info, char separator, - const std::string& name, const std::string& value, - std::vector* result) { - result->clear(); - Status status; - - for (size_t start = 0, end = 0; - status.ok() && start < value.size() && end != std::string::npos; - start = end + 1) { - std::string token; - status = OptionTypeInfo::NextToken(value, separator, start, &end, &token); - if (status.ok()) { - T elem; - status = elem_info.Parse(config_options, name, token, - reinterpret_cast(&elem)); - if (status.ok()) { - result->emplace_back(elem); - } - } - } - return status; -} - -// Serializes the input vector into its output value. Elements are -// separated by the separator character. This element will convert all of the -// elements in vec into their serialized form, using elem_info to perform the -// serialization. -// For example, if the vec contains the integers 1,2,3,4,5 and elem_info -// serializes the output would be 1:2:3:4:5 for separator ":". -// @param config_options Controls how the option value is serialized. -// @param elem_info Controls how individual tokens in value are serialized -// @param separator Character separating tokens in value (':' in the above -// example) -// @param name The name associated with this vector option -// @param vec The input vector to serialize -// @param value The output string of serialized options -// @return OK if the value was successfully parse -// @return InvalidArgument if the value is improperly formed or if the token -// could not be parsed -// @return NotFound If the tokenized value contains unknown options for -// its type -template -Status SerializeVector(const ConfigOptions& config_options, - const OptionTypeInfo& elem_info, char separator, - const std::string& name, const std::vector& vec, - std::string* value) { - std::string result; - ConfigOptions embedded = config_options; - embedded.delimiter = ";"; - for (size_t i = 0; i < vec.size(); ++i) { - std::string elem_str; - Status s = elem_info.Serialize( - embedded, name, reinterpret_cast(&vec[i]), &elem_str); - if (!s.ok()) { - return s; - } else { - if (i > 0) { - result += separator; - } - // If the element contains embedded separators, put it inside of brackets - if (result.find(separator) != std::string::npos) { - result += "{" + elem_str + "}"; - } else { - result += elem_str; - } - } - } - if (result.find("=") != std::string::npos) { - *value = "{" + result + "}"; - } else { - *value = result; - } - return Status::OK(); -} - -// Compares the input vectors vec1 and vec2 for equality -// If the vectors are the same size, elements of the vectors are compared one by -// one using elem_info to perform the comparison. -// -// @param config_options Controls how the vectors are compared. -// @param elem_info Controls how individual elements in the vectors are compared -// @param name The name associated with this vector option -// @param vec1,vec2 The vectors to compare. -// @param mismatch If the vectors are not equivalent, mismatch will point to -// the first -// element of the comparison tht did not match. -// @return true If vec1 and vec2 are "equal", false otherwise -template -bool VectorsAreEqual(const ConfigOptions& config_options, - const OptionTypeInfo& elem_info, const std::string& name, - const std::vector& vec1, const std::vector& vec2, - std::string* mismatch) { - if (vec1.size() != vec2.size()) { - *mismatch = name; - return false; - } else { - for (size_t i = 0; i < vec1.size(); ++i) { - if (!elem_info.AreEqual( - config_options, name, reinterpret_cast(&vec1[i]), - reinterpret_cast(&vec2[i]), mismatch)) { - return false; - } - } - return true; - } -} -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/options_util.h b/dist/darwin_amd64/include/rocksdb/utilities/options_util.h deleted file mode 100644 index 681b42c..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/options_util.h +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -// This file contains utility functions for RocksDB Options. -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include - -#include "rocksdb/convenience.h" -#include "rocksdb/db.h" -#include "rocksdb/env.h" -#include "rocksdb/options.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { -struct ConfigOptions; -// Constructs the DBOptions and ColumnFamilyDescriptors by loading the -// latest RocksDB options file stored in the specified rocksdb database. -// -// Note that the all the pointer options (except table_factory, which will -// be described in more details below) will be initialized with the default -// values. Developers can further initialize them after this function call. -// Below is an example list of pointer options which will be initialized -// -// * env -// * memtable_factory -// * compaction_filter_factory -// * prefix_extractor -// * comparator -// * merge_operator -// * compaction_filter -// -// User can also choose to load customized comparator, env, and/or -// merge_operator through object registry: -// * comparator needs to be registered through Registrar -// * env needs to be registered through Registrar -// * merge operator needs to be registered through -// Registrar>. -// -// For table_factory, this function further supports deserializing -// BlockBasedTableFactory and its BlockBasedTableOptions except the -// pointer options of BlockBasedTableOptions (flush_block_policy_factory, -// block_cache, and block_cache_compressed), which will be initialized with -// default values. Developers can further specify these three options by -// casting the return value of TableFactory::GetOptions() to -// BlockBasedTableOptions and making necessary changes. -// -// ignore_unknown_options can be set to true if you want to ignore options -// that are from a newer version of the db, esentially for forward -// compatibility. -// -// config_options contains a set of options that controls the processing -// of the options. The LoadLatestOptions(ConfigOptions...) should be preferred; -// the alternative signature may be deprecated in a future release. The -// equivalent functionality can be achieved by setting the corresponding options -// in the ConfigOptions parameter. -// -// examples/options_file_example.cc demonstrates how to use this function -// to open a RocksDB instance. -// -// @return the function returns an OK status when it went successfully. If -// the specified "dbpath" does not contain any option file, then a -// Status::NotFound will be returned. A return value other than -// Status::OK or Status::NotFound indicates there're some error related -// to the options file itself. -// -// @see LoadOptionsFromFile -Status LoadLatestOptions(const std::string& dbpath, Env* env, - DBOptions* db_options, - std::vector* cf_descs, - bool ignore_unknown_options = false, - std::shared_ptr* cache = {}); -Status LoadLatestOptions(const ConfigOptions& config_options, - const std::string& dbpath, DBOptions* db_options, - std::vector* cf_descs, - std::shared_ptr* cache = {}); - -// Similar to LoadLatestOptions, this function constructs the DBOptions -// and ColumnFamilyDescriptors based on the specified RocksDB Options file. -// -// The LoadOptionsFile(ConfigOptions...) should be preferred; -// the alternative signature may be deprecated in a future release. The -// equivalent functionality can be achieved by setting the corresponding -// options in the ConfigOptions parameter. -// -// @see LoadLatestOptions -Status LoadOptionsFromFile(const std::string& options_file_name, Env* env, - DBOptions* db_options, - std::vector* cf_descs, - bool ignore_unknown_options = false, - std::shared_ptr* cache = {}); -Status LoadOptionsFromFile(const ConfigOptions& config_options, - const std::string& options_file_name, - DBOptions* db_options, - std::vector* cf_descs, - std::shared_ptr* cache = {}); - -// Returns the latest options file name under the specified db path. -Status GetLatestOptionsFileName(const std::string& dbpath, Env* env, - std::string* options_file_name); - -// Returns Status::OK if the input DBOptions and ColumnFamilyDescriptors -// are compatible with the latest options stored in the specified DB path. -// -// If the return status is non-ok, it means the specified RocksDB instance -// might not be correctly opened with the input set of options. Currently, -// changing one of the following options will fail the compatibility check: -// -// * comparator -// * prefix_extractor -// * table_factory -// * merge_operator -Status CheckOptionsCompatibility( - const std::string& dbpath, Env* env, const DBOptions& db_options, - const std::vector& cf_descs, - bool ignore_unknown_options = false); -Status CheckOptionsCompatibility( - const ConfigOptions& config_options, const std::string& dbpath, - const DBOptions& db_options, - const std::vector& cf_descs); - -} // namespace ROCKSDB_NAMESPACE -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/sim_cache.h b/dist/darwin_amd64/include/rocksdb/utilities/sim_cache.h deleted file mode 100644 index ba6f1d7..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/sim_cache.h +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include -#include -#include "rocksdb/cache.h" -#include "rocksdb/env.h" -#include "rocksdb/slice.h" -#include "rocksdb/statistics.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class SimCache; - -// For instrumentation purpose, use NewSimCache instead of NewLRUCache API -// NewSimCache is a wrapper function returning a SimCache instance that can -// have additional interface provided in Simcache class besides Cache interface -// to predict block cache hit rate without actually allocating the memory. It -// can help users tune their current block cache size, and determine how -// efficient they are using the memory. -// -// Since GetSimCapacity() returns the capacity for simulutation, it differs from -// actual memory usage, which can be estimated as: -// sim_capacity * entry_size / (entry_size + block_size), -// where 76 <= entry_size <= 104, -// BlockBasedTableOptions.block_size = 4096 by default but is configurable, -// Therefore, generally the actual memory overhead of SimCache is Less than -// sim_capacity * 2% -extern std::shared_ptr NewSimCache(std::shared_ptr cache, - size_t sim_capacity, - int num_shard_bits); - -extern std::shared_ptr NewSimCache(std::shared_ptr sim_cache, - std::shared_ptr cache, - int num_shard_bits); - -class SimCache : public Cache { - public: - SimCache() {} - - ~SimCache() override {} - - const char* Name() const override { return "SimCache"; } - - // returns the maximum configured capacity of the simcache for simulation - virtual size_t GetSimCapacity() const = 0; - - // simcache doesn't provide internal handler reference to user, so always - // PinnedUsage = 0 and the behavior will be not exactly consistent the - // with real cache. - // returns the memory size for the entries residing in the simcache. - virtual size_t GetSimUsage() const = 0; - - // sets the maximum configured capacity of the simcache. When the new - // capacity is less than the old capacity and the existing usage is - // greater than new capacity, the implementation will purge old entries - // to fit new capapicty. - virtual void SetSimCapacity(size_t capacity) = 0; - - // returns the lookup times of simcache - virtual uint64_t get_miss_counter() const = 0; - // returns the hit times of simcache - virtual uint64_t get_hit_counter() const = 0; - // reset the lookup and hit counters - virtual void reset_counter() = 0; - // String representation of the statistics of the simcache - virtual std::string ToString() const = 0; - - // Start storing logs of the cache activity (Add/Lookup) into - // a file located at activity_log_file, max_logging_size option can be used to - // stop logging to the file automatically after reaching a specific size in - // bytes, a values of 0 disable this feature - virtual Status StartActivityLogging(const std::string& activity_log_file, - Env* env, - uint64_t max_logging_size = 0) = 0; - - // Stop cache activity logging if any - virtual void StopActivityLogging() = 0; - - // Status of cache logging happening in background - virtual Status GetActivityLoggingStatus() = 0; - - private: - SimCache(const SimCache&); - SimCache& operator=(const SimCache&); -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h b/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h deleted file mode 100644 index 93c9e9a..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once -#include -#include -#include -#include "rocksdb/db.h" - -#ifdef _WIN32 -// Windows API macro interference -#undef DeleteFile -#endif - -namespace ROCKSDB_NAMESPACE { - -// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d -class StackableDB : public DB { - public: - // StackableDB take sole ownership of the underlying db. - explicit StackableDB(DB* db) : db_(db) {} - - // StackableDB take shared ownership of the underlying db. - explicit StackableDB(std::shared_ptr db) - : db_(db.get()), shared_db_ptr_(db) {} - - ~StackableDB() { - if (shared_db_ptr_ == nullptr) { - delete db_; - } else { - assert(shared_db_ptr_.get() == db_); - } - db_ = nullptr; - } - - virtual Status Close() override { return db_->Close(); } - - virtual DB* GetBaseDB() { return db_; } - - virtual DB* GetRootDB() override { return db_->GetRootDB(); } - - virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, - const std::string& column_family_name, - ColumnFamilyHandle** handle) override { - return db_->CreateColumnFamily(options, column_family_name, handle); - } - - virtual Status CreateColumnFamilies( - const ColumnFamilyOptions& options, - const std::vector& column_family_names, - std::vector* handles) override { - return db_->CreateColumnFamilies(options, column_family_names, handles); - } - - virtual Status CreateColumnFamilies( - const std::vector& column_families, - std::vector* handles) override { - return db_->CreateColumnFamilies(column_families, handles); - } - - virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override { - return db_->DropColumnFamily(column_family); - } - - virtual Status DropColumnFamilies( - const std::vector& column_families) override { - return db_->DropColumnFamilies(column_families); - } - - virtual Status DestroyColumnFamilyHandle( - ColumnFamilyHandle* column_family) override { - return db_->DestroyColumnFamilyHandle(column_family); - } - - using DB::Put; - virtual Status Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& val) override { - return db_->Put(options, column_family, key, val); - } - - using DB::Get; - virtual Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* value) override { - return db_->Get(options, column_family, key, value); - } - - using DB::GetMergeOperands; - virtual Status GetMergeOperands( - const ReadOptions& options, ColumnFamilyHandle* column_family, - const Slice& key, PinnableSlice* slice, - GetMergeOperandsOptions* get_merge_operands_options, - int* number_of_operands) override { - return db_->GetMergeOperands(options, column_family, key, slice, - get_merge_operands_options, - number_of_operands); - } - - using DB::MultiGet; - virtual std::vector MultiGet( - const ReadOptions& options, - const std::vector& column_family, - const std::vector& keys, - std::vector* values) override { - return db_->MultiGet(options, column_family, keys, values); - } - - virtual void MultiGet(const ReadOptions& options, - ColumnFamilyHandle* column_family, - const size_t num_keys, const Slice* keys, - PinnableSlice* values, Status* statuses, - const bool sorted_input = false) override { - return db_->MultiGet(options, column_family, num_keys, keys, - values, statuses, sorted_input); - } - - using DB::IngestExternalFile; - virtual Status IngestExternalFile( - ColumnFamilyHandle* column_family, - const std::vector& external_files, - const IngestExternalFileOptions& options) override { - return db_->IngestExternalFile(column_family, external_files, options); - } - - using DB::IngestExternalFiles; - virtual Status IngestExternalFiles( - const std::vector& args) override { - return db_->IngestExternalFiles(args); - } - - using DB::CreateColumnFamilyWithImport; - virtual Status CreateColumnFamilyWithImport( - const ColumnFamilyOptions& options, const std::string& column_family_name, - const ImportColumnFamilyOptions& import_options, - const ExportImportFilesMetaData& metadata, - ColumnFamilyHandle** handle) override { - return db_->CreateColumnFamilyWithImport(options, column_family_name, - import_options, metadata, handle); - } - - virtual Status VerifyChecksum() override { return db_->VerifyChecksum(); } - - virtual Status VerifyChecksum(const ReadOptions& options) override { - return db_->VerifyChecksum(options); - } - - using DB::KeyMayExist; - virtual bool KeyMayExist(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value, - bool* value_found = nullptr) override { - return db_->KeyMayExist(options, column_family, key, value, value_found); - } - - using DB::Delete; - virtual Status Delete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) override { - return db_->Delete(wopts, column_family, key); - } - - using DB::SingleDelete; - virtual Status SingleDelete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) override { - return db_->SingleDelete(wopts, column_family, key); - } - - using DB::Merge; - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { - return db_->Merge(options, column_family, key, value); - } - - virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override { - return db_->Write(opts, updates); - } - - using DB::NewIterator; - virtual Iterator* NewIterator(const ReadOptions& opts, - ColumnFamilyHandle* column_family) override { - return db_->NewIterator(opts, column_family); - } - - virtual Status NewIterators( - const ReadOptions& options, - const std::vector& column_families, - std::vector* iterators) override { - return db_->NewIterators(options, column_families, iterators); - } - - virtual const Snapshot* GetSnapshot() override { return db_->GetSnapshot(); } - - virtual void ReleaseSnapshot(const Snapshot* snapshot) override { - return db_->ReleaseSnapshot(snapshot); - } - - using DB::GetMapProperty; - using DB::GetProperty; - virtual bool GetProperty(ColumnFamilyHandle* column_family, - const Slice& property, std::string* value) override { - return db_->GetProperty(column_family, property, value); - } - virtual bool GetMapProperty( - ColumnFamilyHandle* column_family, const Slice& property, - std::map* value) override { - return db_->GetMapProperty(column_family, property, value); - } - - using DB::GetIntProperty; - virtual bool GetIntProperty(ColumnFamilyHandle* column_family, - const Slice& property, uint64_t* value) override { - return db_->GetIntProperty(column_family, property, value); - } - - using DB::GetAggregatedIntProperty; - virtual bool GetAggregatedIntProperty(const Slice& property, - uint64_t* value) override { - return db_->GetAggregatedIntProperty(property, value); - } - - using DB::GetApproximateSizes; - virtual Status GetApproximateSizes(const SizeApproximationOptions& options, - ColumnFamilyHandle* column_family, - const Range* r, int n, - uint64_t* sizes) override { - return db_->GetApproximateSizes(options, column_family, r, n, sizes); - } - - using DB::GetApproximateMemTableStats; - virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family, - const Range& range, - uint64_t* const count, - uint64_t* const size) override { - return db_->GetApproximateMemTableStats(column_family, range, count, size); - } - - using DB::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& options, - ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end) override { - return db_->CompactRange(options, column_family, begin, end); - } - - using DB::CompactFiles; - virtual Status CompactFiles( - const CompactionOptions& compact_options, - ColumnFamilyHandle* column_family, - const std::vector& input_file_names, const int output_level, - const int output_path_id = -1, - std::vector* const output_file_names = nullptr, - CompactionJobInfo* compaction_job_info = nullptr) override { - return db_->CompactFiles(compact_options, column_family, input_file_names, - output_level, output_path_id, output_file_names, - compaction_job_info); - } - - virtual Status PauseBackgroundWork() override { - return db_->PauseBackgroundWork(); - } - virtual Status ContinueBackgroundWork() override { - return db_->ContinueBackgroundWork(); - } - - virtual Status EnableAutoCompaction( - const std::vector& column_family_handles) override { - return db_->EnableAutoCompaction(column_family_handles); - } - - virtual void EnableManualCompaction() override { - return db_->EnableManualCompaction(); - } - virtual void DisableManualCompaction() override { - return db_->DisableManualCompaction(); - } - - using DB::NumberLevels; - virtual int NumberLevels(ColumnFamilyHandle* column_family) override { - return db_->NumberLevels(column_family); - } - - using DB::MaxMemCompactionLevel; - virtual int MaxMemCompactionLevel( - ColumnFamilyHandle* column_family) override { - return db_->MaxMemCompactionLevel(column_family); - } - - using DB::Level0StopWriteTrigger; - virtual int Level0StopWriteTrigger( - ColumnFamilyHandle* column_family) override { - return db_->Level0StopWriteTrigger(column_family); - } - - virtual const std::string& GetName() const override { return db_->GetName(); } - - virtual Env* GetEnv() const override { return db_->GetEnv(); } - - virtual FileSystem* GetFileSystem() const override { - return db_->GetFileSystem(); - } - - using DB::GetOptions; - virtual Options GetOptions(ColumnFamilyHandle* column_family) const override { - return db_->GetOptions(column_family); - } - - using DB::GetDBOptions; - virtual DBOptions GetDBOptions() const override { - return db_->GetDBOptions(); - } - - using DB::Flush; - virtual Status Flush(const FlushOptions& fopts, - ColumnFamilyHandle* column_family) override { - return db_->Flush(fopts, column_family); - } - virtual Status Flush( - const FlushOptions& fopts, - const std::vector& column_families) override { - return db_->Flush(fopts, column_families); - } - - virtual Status SyncWAL() override { return db_->SyncWAL(); } - - virtual Status FlushWAL(bool sync) override { return db_->FlushWAL(sync); } - - virtual Status LockWAL() override { return db_->LockWAL(); } - - virtual Status UnlockWAL() override { return db_->UnlockWAL(); } - -#ifndef ROCKSDB_LITE - - virtual Status DisableFileDeletions() override { - return db_->DisableFileDeletions(); - } - - virtual Status EnableFileDeletions(bool force) override { - return db_->EnableFileDeletions(force); - } - - virtual void GetLiveFilesMetaData( - std::vector* metadata) override { - db_->GetLiveFilesMetaData(metadata); - } - - virtual Status GetLiveFilesChecksumInfo( - FileChecksumList* checksum_list) override { - return db_->GetLiveFilesChecksumInfo(checksum_list); - } - - virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family, - ColumnFamilyMetaData* cf_meta) override { - db_->GetColumnFamilyMetaData(column_family, cf_meta); - } - - using DB::StartBlockCacheTrace; - Status StartBlockCacheTrace( - const TraceOptions& options, - std::unique_ptr&& trace_writer) override { - return db_->StartBlockCacheTrace(options, std::move(trace_writer)); - } - - using DB::EndBlockCacheTrace; - Status EndBlockCacheTrace() override { return db_->EndBlockCacheTrace(); } - -#endif // ROCKSDB_LITE - - virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, - bool flush_memtable = true) override { - return db_->GetLiveFiles(vec, mfs, flush_memtable); - } - - virtual SequenceNumber GetLatestSequenceNumber() const override { - return db_->GetLatestSequenceNumber(); - } - - virtual bool SetPreserveDeletesSequenceNumber( - SequenceNumber seqnum) override { - return db_->SetPreserveDeletesSequenceNumber(seqnum); - } - - virtual Status GetSortedWalFiles(VectorLogPtr& files) override { - return db_->GetSortedWalFiles(files); - } - - virtual Status GetCurrentWalFile( - std::unique_ptr* current_log_file) override { - return db_->GetCurrentWalFile(current_log_file); - } - - virtual Status GetCreationTimeOfOldestFile( - uint64_t* creation_time) override { - return db_->GetCreationTimeOfOldestFile(creation_time); - } - - // WARNING: This API is planned for removal in RocksDB 7.0 since it does not - // operate at the proper level of abstraction for a key-value store, and its - // contract/restrictions are poorly documented. For example, it returns non-OK - // `Status` for non-bottommost files and files undergoing compaction. Since we - // do not plan to maintain it, the contract will likely remain underspecified - // until its removal. Any user is encouraged to read the implementation - // carefully and migrate away from it when possible. - virtual Status DeleteFile(std::string name) override { - return db_->DeleteFile(name); - } - - virtual Status GetDbIdentity(std::string& identity) const override { - return db_->GetDbIdentity(identity); - } - - virtual Status GetDbSessionId(std::string& session_id) const override { - return db_->GetDbSessionId(session_id); - } - - using DB::SetOptions; - virtual Status SetOptions(ColumnFamilyHandle* column_family_handle, - const std::unordered_map& - new_options) override { - return db_->SetOptions(column_family_handle, new_options); - } - - virtual Status SetDBOptions( - const std::unordered_map& new_options) - override { - return db_->SetDBOptions(new_options); - } - - using DB::ResetStats; - virtual Status ResetStats() override { return db_->ResetStats(); } - - using DB::GetPropertiesOfAllTables; - virtual Status GetPropertiesOfAllTables( - ColumnFamilyHandle* column_family, - TablePropertiesCollection* props) override { - return db_->GetPropertiesOfAllTables(column_family, props); - } - - using DB::GetPropertiesOfTablesInRange; - virtual Status GetPropertiesOfTablesInRange( - ColumnFamilyHandle* column_family, const Range* range, std::size_t n, - TablePropertiesCollection* props) override { - return db_->GetPropertiesOfTablesInRange(column_family, range, n, props); - } - - virtual Status GetUpdatesSince( - SequenceNumber seq_number, std::unique_ptr* iter, - const TransactionLogIterator::ReadOptions& read_options) override { - return db_->GetUpdatesSince(seq_number, iter, read_options); - } - - virtual Status SuggestCompactRange(ColumnFamilyHandle* column_family, - const Slice* begin, - const Slice* end) override { - return db_->SuggestCompactRange(column_family, begin, end); - } - - virtual Status PromoteL0(ColumnFamilyHandle* column_family, - int target_level) override { - return db_->PromoteL0(column_family, target_level); - } - - virtual ColumnFamilyHandle* DefaultColumnFamily() const override { - return db_->DefaultColumnFamily(); - } - -#ifndef ROCKSDB_LITE - Status TryCatchUpWithPrimary() override { - return db_->TryCatchUpWithPrimary(); - } -#endif // ROCKSDB_LITE - - protected: - DB* db_; - std::shared_ptr shared_db_ptr_; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/table_properties_collectors.h b/dist/darwin_amd64/include/rocksdb/utilities/table_properties_collectors.h deleted file mode 100644 index 0f5612b..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/table_properties_collectors.h +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifndef ROCKSDB_LITE -#include -#include - -#include "rocksdb/table_properties.h" - -namespace ROCKSDB_NAMESPACE { - -// A factory of a table property collector that marks a SST -// file as need-compaction when it observe at least "D" deletion -// entries in any "N" consecutive entries or the ratio of tombstone -// entries in the whole file >= the specified deletion ratio. -class CompactOnDeletionCollectorFactory - : public TablePropertiesCollectorFactory { - public: - ~CompactOnDeletionCollectorFactory() {} - - TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) override; - - // Change the value of sliding_window_size "N" - // Setting it to 0 disables the delete triggered compaction - void SetWindowSize(size_t sliding_window_size) { - sliding_window_size_.store(sliding_window_size); - } - - // Change the value of deletion_trigger "D" - void SetDeletionTrigger(size_t deletion_trigger) { - deletion_trigger_.store(deletion_trigger); - } - - // Change deletion ratio. - // @param deletion_ratio, if <= 0 or > 1, disable triggering compaction - // based on deletion ratio. - void SetDeletionRatio(double deletion_ratio) { - deletion_ratio_.store(deletion_ratio); - } - - const char* Name() const override { - return "CompactOnDeletionCollector"; - } - - std::string ToString() const override; - - private: - friend std::shared_ptr - NewCompactOnDeletionCollectorFactory(size_t sliding_window_size, - size_t deletion_trigger, - double deletion_ratio); - // A factory of a table property collector that marks a SST - // file as need-compaction when it observe at least "D" deletion - // entries in any "N" consecutive entries, or the ratio of tombstone - // entries >= deletion_ratio. - // - // @param sliding_window_size "N" - // @param deletion_trigger "D" - // @param deletion_ratio, if <= 0 or > 1, disable triggering compaction - // based on deletion ratio. - CompactOnDeletionCollectorFactory(size_t sliding_window_size, - size_t deletion_trigger, - double deletion_ratio) - : sliding_window_size_(sliding_window_size), - deletion_trigger_(deletion_trigger), - deletion_ratio_(deletion_ratio) {} - - std::atomic sliding_window_size_; - std::atomic deletion_trigger_; - std::atomic deletion_ratio_; -}; - -// Creates a factory of a table property collector that marks a SST -// file as need-compaction when it observe at least "D" deletion -// entries in any "N" consecutive entries, or the ratio of tombstone -// entries >= deletion_ratio. -// -// @param sliding_window_size "N". Note that this number will be -// round up to the smallest multiple of 128 that is no less -// than the specified size. -// @param deletion_trigger "D". Note that even when "N" is changed, -// the specified number for "D" will not be changed. -// @param deletion_ratio, if <= 0 or > 1, disable triggering compaction -// based on deletion ratio. Disabled by default. -extern std::shared_ptr -NewCompactOnDeletionCollectorFactory(size_t sliding_window_size, - size_t deletion_trigger, - double deletion_ratio = 0); -} // namespace ROCKSDB_NAMESPACE - -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/transaction.h b/dist/darwin_amd64/include/rocksdb/utilities/transaction.h deleted file mode 100644 index 6ebdbcc..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/transaction.h +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include - -#include "rocksdb/comparator.h" -#include "rocksdb/db.h" -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -class Iterator; -class TransactionDB; -class WriteBatchWithIndex; - -using TransactionName = std::string; - -using TransactionID = uint64_t; - -// Provides notification to the caller of SetSnapshotOnNextOperation when -// the actual snapshot gets created -class TransactionNotifier { - public: - virtual ~TransactionNotifier() {} - - // Implement this method to receive notification when a snapshot is - // requested via SetSnapshotOnNextOperation. - virtual void SnapshotCreated(const Snapshot* newSnapshot) = 0; -}; - -// Provides BEGIN/COMMIT/ROLLBACK transactions. -// -// To use transactions, you must first create either an OptimisticTransactionDB -// or a TransactionDB. See examples/[optimistic_]transaction_example.cc for -// more information. -// -// To create a transaction, use [Optimistic]TransactionDB::BeginTransaction(). -// -// It is up to the caller to synchronize access to this object. -// -// See examples/transaction_example.cc for some simple examples. -// -// TODO(agiardullo): Not yet implemented -// -PerfContext statistics -// -Support for using Transactions with DBWithTTL -class Transaction { - public: - // No copying allowed - Transaction(const Transaction&) = delete; - void operator=(const Transaction&) = delete; - - virtual ~Transaction() {} - - // If a transaction has a snapshot set, the transaction will ensure that - // any keys successfully written(or fetched via GetForUpdate()) have not - // been modified outside of this transaction since the time the snapshot was - // set. - // If a snapshot has not been set, the transaction guarantees that keys have - // not been modified since the time each key was first written (or fetched via - // GetForUpdate()). - // - // Using SetSnapshot() will provide stricter isolation guarantees at the - // expense of potentially more transaction failures due to conflicts with - // other writes. - // - // Calling SetSnapshot() has no effect on keys written before this function - // has been called. - // - // SetSnapshot() may be called multiple times if you would like to change - // the snapshot used for different operations in this transaction. - // - // Calling SetSnapshot will not affect the version of Data returned by Get() - // methods. See Transaction::Get() for more details. - virtual void SetSnapshot() = 0; - - // Similar to SetSnapshot(), but will not change the current snapshot - // until Put/Merge/Delete/GetForUpdate/MultigetForUpdate is called. - // By calling this function, the transaction will essentially call - // SetSnapshot() for you right before performing the next write/GetForUpdate. - // - // Calling SetSnapshotOnNextOperation() will not affect what snapshot is - // returned by GetSnapshot() until the next write/GetForUpdate is executed. - // - // When the snapshot is created the notifier's SnapshotCreated method will - // be called so that the caller can get access to the snapshot. - // - // This is an optimization to reduce the likelihood of conflicts that - // could occur in between the time SetSnapshot() is called and the first - // write/GetForUpdate operation. Eg, this prevents the following - // race-condition: - // - // txn1->SetSnapshot(); - // txn2->Put("A", ...); - // txn2->Commit(); - // txn1->GetForUpdate(opts, "A", ...); // FAIL! - virtual void SetSnapshotOnNextOperation( - std::shared_ptr notifier = nullptr) = 0; - - // Returns the Snapshot created by the last call to SetSnapshot(). - // - // REQUIRED: The returned Snapshot is only valid up until the next time - // SetSnapshot()/SetSnapshotOnNextSavePoint() is called, ClearSnapshot() - // is called, or the Transaction is deleted. - virtual const Snapshot* GetSnapshot() const = 0; - - // Clears the current snapshot (i.e. no snapshot will be 'set') - // - // This removes any snapshot that currently exists or is set to be created - // on the next update operation (SetSnapshotOnNextOperation). - // - // Calling ClearSnapshot() has no effect on keys written before this function - // has been called. - // - // If a reference to a snapshot was retrieved via GetSnapshot(), it will no - // longer be valid and should be discarded after a call to ClearSnapshot(). - virtual void ClearSnapshot() = 0; - - // Prepare the current transaction for 2PC - virtual Status Prepare() = 0; - - // Write all batched keys to the db atomically. - // - // Returns OK on success. - // - // May return any error status that could be returned by DB:Write(). - // - // If this transaction was created by an OptimisticTransactionDB(), - // Status::Busy() may be returned if the transaction could not guarantee - // that there are no write conflicts. Status::TryAgain() may be returned - // if the memtable history size is not large enough - // (See max_write_buffer_size_to_maintain). - // - // If this transaction was created by a TransactionDB(), Status::Expired() - // may be returned if this transaction has lived for longer than - // TransactionOptions.expiration. Status::TxnNotPrepared() may be returned if - // TransactionOptions.skip_prepare is false and Prepare is not called on this - // transaction before Commit. - virtual Status Commit() = 0; - - // Discard all batched writes in this transaction. - virtual Status Rollback() = 0; - - // Records the state of the transaction for future calls to - // RollbackToSavePoint(). May be called multiple times to set multiple save - // points. - virtual void SetSavePoint() = 0; - - // Undo all operations in this transaction (Put, Merge, Delete, PutLogData) - // since the most recent call to SetSavePoint() and removes the most recent - // SetSavePoint(). - // If there is no previous call to SetSavePoint(), returns Status::NotFound() - virtual Status RollbackToSavePoint() = 0; - - // Pop the most recent save point. - // If there is no previous call to SetSavePoint(), Status::NotFound() - // will be returned. - // Otherwise returns Status::OK(). - virtual Status PopSavePoint() = 0; - - // This function is similar to DB::Get() except it will also read pending - // changes in this transaction. Currently, this function will return - // Status::MergeInProgress if the most recent write to the queried key in - // this batch is a Merge. - // - // If read_options.snapshot is not set, the current version of the key will - // be read. Calling SetSnapshot() does not affect the version of the data - // returned. - // - // Note that setting read_options.snapshot will affect what is read from the - // DB but will NOT change which keys are read from this transaction (the keys - // in this transaction do not yet belong to any snapshot and will be fetched - // regardless). - virtual Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value) = 0; - - // An overload of the above method that receives a PinnableSlice - // For backward compatibility a default implementation is provided - virtual Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* pinnable_val) { - assert(pinnable_val != nullptr); - auto s = Get(options, column_family, key, pinnable_val->GetSelf()); - pinnable_val->PinSelf(); - return s; - } - - virtual Status Get(const ReadOptions& options, const Slice& key, - std::string* value) = 0; - virtual Status Get(const ReadOptions& options, const Slice& key, - PinnableSlice* pinnable_val) { - assert(pinnable_val != nullptr); - auto s = Get(options, key, pinnable_val->GetSelf()); - pinnable_val->PinSelf(); - return s; - } - - virtual std::vector MultiGet( - const ReadOptions& options, - const std::vector& column_family, - const std::vector& keys, std::vector* values) = 0; - - virtual std::vector MultiGet(const ReadOptions& options, - const std::vector& keys, - std::vector* values) = 0; - - // Batched version of MultiGet - see DBImpl::MultiGet(). Sub-classes are - // expected to override this with an implementation that calls - // DBImpl::MultiGet() - virtual void MultiGet(const ReadOptions& options, - ColumnFamilyHandle* column_family, - const size_t num_keys, const Slice* keys, - PinnableSlice* values, Status* statuses, - const bool /*sorted_input*/ = false) { - for (size_t i = 0; i < num_keys; ++i) { - statuses[i] = Get(options, column_family, keys[i], &values[i]); - } - } - - // Read this key and ensure that this transaction will only - // be able to be committed if this key is not written outside this - // transaction after it has first been read (or after the snapshot if a - // snapshot is set in this transaction and do_validate is true). If - // do_validate is false, ReadOptions::snapshot is expected to be nullptr so - // that GetForUpdate returns the latest committed value. The transaction - // behavior is the same regardless of whether the key exists or not. - // - // Note: Currently, this function will return Status::MergeInProgress - // if the most recent write to the queried key in this batch is a Merge. - // - // The values returned by this function are similar to Transaction::Get(). - // If value==nullptr, then this function will not read any data, but will - // still ensure that this key cannot be written to by outside of this - // transaction. - // - // If this transaction was created by an OptimisticTransaction, GetForUpdate() - // could cause commit() to fail. Otherwise, it could return any error - // that could be returned by DB::Get(). - // - // If this transaction was created by a TransactionDB, it can return - // Status::OK() on success, - // Status::Busy() if there is a write conflict, - // Status::TimedOut() if a lock could not be acquired, - // Status::TryAgain() if the memtable history size is not large enough - // (See max_write_buffer_size_to_maintain) - // Status::MergeInProgress() if merge operations cannot be resolved. - // or other errors if this key could not be read. - virtual Status GetForUpdate(const ReadOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key, std::string* value, - bool exclusive = true, - const bool do_validate = true) = 0; - - // An overload of the above method that receives a PinnableSlice - // For backward compatibility a default implementation is provided - virtual Status GetForUpdate(const ReadOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key, PinnableSlice* pinnable_val, - bool exclusive = true, - const bool do_validate = true) { - if (pinnable_val == nullptr) { - std::string* null_str = nullptr; - return GetForUpdate(options, column_family, key, null_str, exclusive, - do_validate); - } else { - auto s = GetForUpdate(options, column_family, key, - pinnable_val->GetSelf(), exclusive, do_validate); - pinnable_val->PinSelf(); - return s; - } - } - - virtual Status GetForUpdate(const ReadOptions& options, const Slice& key, - std::string* value, bool exclusive = true, - const bool do_validate = true) = 0; - - virtual std::vector MultiGetForUpdate( - const ReadOptions& options, - const std::vector& column_family, - const std::vector& keys, std::vector* values) = 0; - - virtual std::vector MultiGetForUpdate( - const ReadOptions& options, const std::vector& keys, - std::vector* values) = 0; - - // Returns an iterator that will iterate on all keys in the default - // column family including both keys in the DB and uncommitted keys in this - // transaction. - // - // Setting read_options.snapshot will affect what is read from the - // DB but will NOT change which keys are read from this transaction (the keys - // in this transaction do not yet belong to any snapshot and will be fetched - // regardless). - // - // Caller is responsible for deleting the returned Iterator. - // - // The returned iterator is only valid until Commit(), Rollback(), or - // RollbackToSavePoint() is called. - virtual Iterator* GetIterator(const ReadOptions& read_options) = 0; - - virtual Iterator* GetIterator(const ReadOptions& read_options, - ColumnFamilyHandle* column_family) = 0; - - // Put, Merge, Delete, and SingleDelete behave similarly to the corresponding - // functions in WriteBatch, but will also do conflict checking on the - // keys being written. - // - // assume_tracked=true expects the key be already tracked. More - // specifically, it means the the key was previous tracked in the same - // savepoint, with the same exclusive flag, and at a lower sequence number. - // If valid then it skips ValidateSnapshot. Returns error otherwise. - // - // If this Transaction was created on an OptimisticTransactionDB, these - // functions should always return Status::OK(). - // - // If this Transaction was created on a TransactionDB, the status returned - // can be: - // Status::OK() on success, - // Status::Busy() if there is a write conflict, - // Status::TimedOut() if a lock could not be acquired, - // Status::TryAgain() if the memtable history size is not large enough - // (See max_write_buffer_size_to_maintain) - // or other errors on unexpected failures. - virtual Status Put(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, const bool assume_tracked = false) = 0; - virtual Status Put(const Slice& key, const Slice& value) = 0; - virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key, - const SliceParts& value, - const bool assume_tracked = false) = 0; - virtual Status Put(const SliceParts& key, const SliceParts& value) = 0; - - virtual Status Merge(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, - const bool assume_tracked = false) = 0; - virtual Status Merge(const Slice& key, const Slice& value) = 0; - - virtual Status Delete(ColumnFamilyHandle* column_family, const Slice& key, - const bool assume_tracked = false) = 0; - virtual Status Delete(const Slice& key) = 0; - virtual Status Delete(ColumnFamilyHandle* column_family, - const SliceParts& key, - const bool assume_tracked = false) = 0; - virtual Status Delete(const SliceParts& key) = 0; - - virtual Status SingleDelete(ColumnFamilyHandle* column_family, - const Slice& key, - const bool assume_tracked = false) = 0; - virtual Status SingleDelete(const Slice& key) = 0; - virtual Status SingleDelete(ColumnFamilyHandle* column_family, - const SliceParts& key, - const bool assume_tracked = false) = 0; - virtual Status SingleDelete(const SliceParts& key) = 0; - - // PutUntracked() will write a Put to the batch of operations to be committed - // in this transaction. This write will only happen if this transaction - // gets committed successfully. But unlike Transaction::Put(), - // no conflict checking will be done for this key. - // - // If this Transaction was created on a PessimisticTransactionDB, this - // function will still acquire locks necessary to make sure this write doesn't - // cause conflicts in other transactions and may return Status::Busy(). - virtual Status PutUntracked(ColumnFamilyHandle* column_family, - const Slice& key, const Slice& value) = 0; - virtual Status PutUntracked(const Slice& key, const Slice& value) = 0; - virtual Status PutUntracked(ColumnFamilyHandle* column_family, - const SliceParts& key, - const SliceParts& value) = 0; - virtual Status PutUntracked(const SliceParts& key, - const SliceParts& value) = 0; - - virtual Status MergeUntracked(ColumnFamilyHandle* column_family, - const Slice& key, const Slice& value) = 0; - virtual Status MergeUntracked(const Slice& key, const Slice& value) = 0; - - virtual Status DeleteUntracked(ColumnFamilyHandle* column_family, - const Slice& key) = 0; - - virtual Status DeleteUntracked(const Slice& key) = 0; - virtual Status DeleteUntracked(ColumnFamilyHandle* column_family, - const SliceParts& key) = 0; - virtual Status DeleteUntracked(const SliceParts& key) = 0; - virtual Status SingleDeleteUntracked(ColumnFamilyHandle* column_family, - const Slice& key) = 0; - - virtual Status SingleDeleteUntracked(const Slice& key) = 0; - - // Similar to WriteBatch::PutLogData - virtual void PutLogData(const Slice& blob) = 0; - - // By default, all Put/Merge/Delete operations will be indexed in the - // transaction so that Get/GetForUpdate/GetIterator can search for these - // keys. - // - // If the caller does not want to fetch the keys about to be written, - // they may want to avoid indexing as a performance optimization. - // Calling DisableIndexing() will turn off indexing for all future - // Put/Merge/Delete operations until EnableIndexing() is called. - // - // If a key is Put/Merge/Deleted after DisableIndexing is called and then - // is fetched via Get/GetForUpdate/GetIterator, the result of the fetch is - // undefined. - virtual void DisableIndexing() = 0; - virtual void EnableIndexing() = 0; - - // Returns the number of distinct Keys being tracked by this transaction. - // If this transaction was created by a TransactionDB, this is the number of - // keys that are currently locked by this transaction. - // If this transaction was created by an OptimisticTransactionDB, this is the - // number of keys that need to be checked for conflicts at commit time. - virtual uint64_t GetNumKeys() const = 0; - - // Returns the number of Puts/Deletes/Merges that have been applied to this - // transaction so far. - virtual uint64_t GetNumPuts() const = 0; - virtual uint64_t GetNumDeletes() const = 0; - virtual uint64_t GetNumMerges() const = 0; - - // Returns the elapsed time in milliseconds since this Transaction began. - virtual uint64_t GetElapsedTime() const = 0; - - // Fetch the underlying write batch that contains all pending changes to be - // committed. - // - // Note: You should not write or delete anything from the batch directly and - // should only use the functions in the Transaction class to - // write to this transaction. - virtual WriteBatchWithIndex* GetWriteBatch() = 0; - - // Change the value of TransactionOptions.lock_timeout (in milliseconds) for - // this transaction. - // Has no effect on OptimisticTransactions. - virtual void SetLockTimeout(int64_t timeout) = 0; - - // Return the WriteOptions that will be used during Commit() - virtual WriteOptions* GetWriteOptions() = 0; - - // Reset the WriteOptions that will be used during Commit(). - virtual void SetWriteOptions(const WriteOptions& write_options) = 0; - - // If this key was previously fetched in this transaction using - // GetForUpdate/MultigetForUpdate(), calling UndoGetForUpdate will tell - // the transaction that it no longer needs to do any conflict checking - // for this key. - // - // If a key has been fetched N times via GetForUpdate/MultigetForUpdate(), - // then UndoGetForUpdate will only have an effect if it is also called N - // times. If this key has been written to in this transaction, - // UndoGetForUpdate() will have no effect. - // - // If SetSavePoint() has been called after the GetForUpdate(), - // UndoGetForUpdate() will not have any effect. - // - // If this Transaction was created by an OptimisticTransactionDB, - // calling UndoGetForUpdate can affect whether this key is conflict checked - // at commit time. - // If this Transaction was created by a TransactionDB, - // calling UndoGetForUpdate may release any held locks for this key. - virtual void UndoGetForUpdate(ColumnFamilyHandle* column_family, - const Slice& key) = 0; - virtual void UndoGetForUpdate(const Slice& key) = 0; - - virtual Status RebuildFromWriteBatch(WriteBatch* src_batch) = 0; - - virtual WriteBatch* GetCommitTimeWriteBatch() = 0; - - virtual void SetLogNumber(uint64_t log) { log_number_ = log; } - - virtual uint64_t GetLogNumber() const { return log_number_; } - - virtual Status SetName(const TransactionName& name) = 0; - - virtual TransactionName GetName() const { return name_; } - - virtual TransactionID GetID() const { return 0; } - - virtual bool IsDeadlockDetect() const { return false; } - - virtual std::vector GetWaitingTxns( - uint32_t* /*column_family_id*/, std::string* /*key*/) const { - assert(false); - return std::vector(); - } - - enum TransactionState { - STARTED = 0, - AWAITING_PREPARE = 1, - PREPARED = 2, - AWAITING_COMMIT = 3, - COMMITTED = 4, - COMMITED = COMMITTED, // old misspelled name - AWAITING_ROLLBACK = 5, - ROLLEDBACK = 6, - LOCKS_STOLEN = 7, - }; - - TransactionState GetState() const { return txn_state_; } - void SetState(TransactionState state) { txn_state_ = state; } - - // NOTE: Experimental feature - // The globally unique id with which the transaction is identified. This id - // might or might not be set depending on the implementation. Similarly the - // implementation decides the point in lifetime of a transaction at which it - // assigns the id. Although currently it is the case, the id is not guaranteed - // to remain the same across restarts. - uint64_t GetId() { return id_; } - - protected: - explicit Transaction(const TransactionDB* /*db*/) {} - Transaction() : log_number_(0), txn_state_(STARTED) {} - - // the log in which the prepared section for this txn resides - // (for two phase commit) - uint64_t log_number_; - TransactionName name_; - - // Execution status of the transaction. - std::atomic txn_state_; - - uint64_t id_ = 0; - virtual void SetId(uint64_t id) { - assert(id_ == 0); - id_ = id; - } - - virtual uint64_t GetLastLogNumber() const { return log_number_; } - - private: - friend class PessimisticTransactionDB; - friend class WriteUnpreparedTxnDB; - friend class TransactionTest_TwoPhaseLogRollingTest_Test; - friend class TransactionTest_TwoPhaseLogRollingTest2_Test; -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/transaction_db.h b/dist/darwin_amd64/include/rocksdb/utilities/transaction_db.h deleted file mode 100644 index 8967b7e..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/transaction_db.h +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifndef ROCKSDB_LITE - -#include -#include -#include - -#include "rocksdb/comparator.h" -#include "rocksdb/db.h" -#include "rocksdb/utilities/stackable_db.h" -#include "rocksdb/utilities/transaction.h" - -// Database with Transaction support. -// -// See transaction.h and examples/transaction_example.cc - -namespace ROCKSDB_NAMESPACE { - -class TransactionDBMutexFactory; - -enum TxnDBWritePolicy { - WRITE_COMMITTED = 0, // write only the committed data - WRITE_PREPARED, // write data after the prepare phase of 2pc - WRITE_UNPREPARED // write data before the prepare phase of 2pc -}; - -const uint32_t kInitialMaxDeadlocks = 5; - -struct TransactionDBOptions { - // Specifies the maximum number of keys that can be locked at the same time - // per column family. - // If the number of locked keys is greater than max_num_locks, transaction - // writes (or GetForUpdate) will return an error. - // If this value is not positive, no limit will be enforced. - int64_t max_num_locks = -1; - - // Stores the number of latest deadlocks to track - uint32_t max_num_deadlocks = kInitialMaxDeadlocks; - - // Increasing this value will increase the concurrency by dividing the lock - // table (per column family) into more sub-tables, each with their own - // separate - // mutex. - size_t num_stripes = 16; - - // If positive, specifies the default wait timeout in milliseconds when - // a transaction attempts to lock a key if not specified by - // TransactionOptions::lock_timeout. - // - // If 0, no waiting is done if a lock cannot instantly be acquired. - // If negative, there is no timeout. Not using a timeout is not recommended - // as it can lead to deadlocks. Currently, there is no deadlock-detection to - // recover - // from a deadlock. - int64_t transaction_lock_timeout = 1000; // 1 second - - // If positive, specifies the wait timeout in milliseconds when writing a key - // OUTSIDE of a transaction (ie by calling DB::Put(),Merge(),Delete(),Write() - // directly). - // If 0, no waiting is done if a lock cannot instantly be acquired. - // If negative, there is no timeout and will block indefinitely when acquiring - // a lock. - // - // Not using a timeout can lead to deadlocks. Currently, there - // is no deadlock-detection to recover from a deadlock. While DB writes - // cannot deadlock with other DB writes, they can deadlock with a transaction. - // A negative timeout should only be used if all transactions have a small - // expiration set. - int64_t default_lock_timeout = 1000; // 1 second - - // If set, the TransactionDB will use this implementation of a mutex and - // condition variable for all transaction locking instead of the default - // mutex/condvar implementation. - std::shared_ptr custom_mutex_factory; - - // The policy for when to write the data into the DB. The default policy is to - // write only the committed data (WRITE_COMMITTED). The data could be written - // before the commit phase. The DB then needs to provide the mechanisms to - // tell apart committed from uncommitted data. - TxnDBWritePolicy write_policy = TxnDBWritePolicy::WRITE_COMMITTED; - - // TODO(myabandeh): remove this option - // Note: this is a temporary option as a hot fix in rollback of writeprepared - // txns in myrocks. MyRocks uses merge operands for autoinc column id without - // however obtaining locks. This breaks the assumption behind the rollback - // logic in myrocks. This hack of simply not rolling back merge operands works - // for the special way that myrocks uses this operands. - bool rollback_merge_operands = false; - - // If true, the TransactionDB implementation might skip concurrency control - // unless it is overridden by TransactionOptions or - // TransactionDBWriteOptimizations. This can be used in conjuction with - // DBOptions::unordered_write when the TransactionDB is used solely for write - // ordering rather than concurrency control. - bool skip_concurrency_control = false; - - // This option is only valid for write unprepared. If a write batch exceeds - // this threshold, then the transaction will implicitly flush the currently - // pending writes into the database. A value of 0 or less means no limit. - int64_t default_write_batch_flush_threshold = 0; - - private: - // 128 entries - size_t wp_snapshot_cache_bits = static_cast(7); - // 8m entry, 64MB size - size_t wp_commit_cache_bits = static_cast(23); - - // For testing, whether transaction name should be auto-generated or not. This - // is useful for write unprepared which requires named transactions. - bool autogenerate_name = false; - - friend class WritePreparedTxnDB; - friend class WriteUnpreparedTxn; - friend class WritePreparedTransactionTestBase; - friend class TransactionTestBase; - friend class MySQLStyleTransactionTest; -}; - -struct TransactionOptions { - // Setting set_snapshot=true is the same as calling - // Transaction::SetSnapshot(). - bool set_snapshot = false; - - // Setting to true means that before acquiring locks, this transaction will - // check if doing so will cause a deadlock. If so, it will return with - // Status::Busy. The user should retry their transaction. - bool deadlock_detect = false; - - // If set, it states that the CommitTimeWriteBatch represents the latest state - // of the application, has only one sub-batch, i.e., no duplicate keys, and - // meant to be used later during recovery. It enables an optimization to - // postpone updating the memtable with CommitTimeWriteBatch to only - // SwitchMemtable or recovery. - bool use_only_the_last_commit_time_batch_for_recovery = false; - - // TODO(agiardullo): TransactionDB does not yet support comparators that allow - // two non-equal keys to be equivalent. Ie, cmp->Compare(a,b) should only - // return 0 if - // a.compare(b) returns 0. - - // If positive, specifies the wait timeout in milliseconds when - // a transaction attempts to lock a key. - // - // If 0, no waiting is done if a lock cannot instantly be acquired. - // If negative, TransactionDBOptions::transaction_lock_timeout will be used. - int64_t lock_timeout = -1; - - // Expiration duration in milliseconds. If non-negative, transactions that - // last longer than this many milliseconds will fail to commit. If not set, - // a forgotten transaction that is never committed, rolled back, or deleted - // will never relinquish any locks it holds. This could prevent keys from - // being written by other writers. - int64_t expiration = -1; - - // The number of traversals to make during deadlock detection. - int64_t deadlock_detect_depth = 50; - - // The maximum number of bytes used for the write batch. 0 means no limit. - size_t max_write_batch_size = 0; - - // Skip Concurrency Control. This could be as an optimization if the - // application knows that the transaction would not have any conflict with - // concurrent transactions. It could also be used during recovery if (i) - // application guarantees no conflict between prepared transactions in the WAL - // (ii) application guarantees that recovered transactions will be rolled - // back/commit before new transactions start. - // Default: false - bool skip_concurrency_control = false; - - // In pessimistic transaction, if this is true, then you can skip Prepare - // before Commit, otherwise, you must Prepare before Commit. - bool skip_prepare = true; - - // See TransactionDBOptions::default_write_batch_flush_threshold for - // description. If a negative value is specified, then the default value from - // TransactionDBOptions is used. - int64_t write_batch_flush_threshold = -1; -}; - -// The per-write optimizations that do not involve transactions. TransactionDB -// implementation might or might not make use of the specified optimizations. -struct TransactionDBWriteOptimizations { - // If it is true it means that the application guarantees that the - // key-set in the write batch do not conflict with any concurrent transaction - // and hence the concurrency control mechanism could be skipped for this - // write. - bool skip_concurrency_control = false; - // If true, the application guarantees that there is no duplicate in the write batch and any employed mechanism to handle - // duplicate keys could be skipped. - bool skip_duplicate_key_check = false; -}; - -struct KeyLockInfo { - std::string key; - std::vector ids; - bool exclusive; -}; - -struct DeadlockInfo { - TransactionID m_txn_id; - uint32_t m_cf_id; - bool m_exclusive; - std::string m_waiting_key; -}; - -struct DeadlockPath { - std::vector path; - bool limit_exceeded; - int64_t deadlock_time; - - explicit DeadlockPath(std::vector path_entry, - const int64_t& dl_time) - : path(path_entry), limit_exceeded(false), deadlock_time(dl_time) {} - - // empty path, limit exceeded constructor and default constructor - explicit DeadlockPath(const int64_t& dl_time = 0, bool limit = false) - : path(0), limit_exceeded(limit), deadlock_time(dl_time) {} - - bool empty() { return path.empty() && !limit_exceeded; } -}; - -class TransactionDB : public StackableDB { - public: - // Optimized version of ::Write that receives more optimization request such - // as skip_concurrency_control. - using StackableDB::Write; - virtual Status Write(const WriteOptions& opts, - const TransactionDBWriteOptimizations&, - WriteBatch* updates) { - // The default implementation ignores TransactionDBWriteOptimizations and - // falls back to the un-optimized version of ::Write - return Write(opts, updates); - } - // Open a TransactionDB similar to DB::Open(). - // Internally call PrepareWrap() and WrapDB() - // If the return status is not ok, then dbptr is set to nullptr. - static Status Open(const Options& options, - const TransactionDBOptions& txn_db_options, - const std::string& dbname, TransactionDB** dbptr); - - static Status Open(const DBOptions& db_options, - const TransactionDBOptions& txn_db_options, - const std::string& dbname, - const std::vector& column_families, - std::vector* handles, - TransactionDB** dbptr); - // Note: PrepareWrap() may change parameters, make copies before the - // invocation if needed. - static void PrepareWrap(DBOptions* db_options, - std::vector* column_families, - std::vector* compaction_enabled_cf_indices); - // If the return status is not ok, then dbptr will bet set to nullptr. The - // input db parameter might or might not be deleted as a result of the - // failure. If it is properly deleted it will be set to nullptr. If the return - // status is ok, the ownership of db is transferred to dbptr. - static Status WrapDB(DB* db, const TransactionDBOptions& txn_db_options, - const std::vector& compaction_enabled_cf_indices, - const std::vector& handles, - TransactionDB** dbptr); - // If the return status is not ok, then dbptr will bet set to nullptr. The - // input db parameter might or might not be deleted as a result of the - // failure. If it is properly deleted it will be set to nullptr. If the return - // status is ok, the ownership of db is transferred to dbptr. - static Status WrapStackableDB( - StackableDB* db, const TransactionDBOptions& txn_db_options, - const std::vector& compaction_enabled_cf_indices, - const std::vector& handles, TransactionDB** dbptr); - // Since the destructor in StackableDB is virtual, this destructor is virtual - // too. The root db will be deleted by the base's destructor. - ~TransactionDB() override {} - - // Starts a new Transaction. - // - // Caller is responsible for deleting the returned transaction when no - // longer needed. - // - // If old_txn is not null, BeginTransaction will reuse this Transaction - // handle instead of allocating a new one. This is an optimization to avoid - // extra allocations when repeatedly creating transactions. - virtual Transaction* BeginTransaction( - const WriteOptions& write_options, - const TransactionOptions& txn_options = TransactionOptions(), - Transaction* old_txn = nullptr) = 0; - - virtual Transaction* GetTransactionByName(const TransactionName& name) = 0; - virtual void GetAllPreparedTransactions(std::vector* trans) = 0; - - // Returns set of all locks held. - // - // The mapping is column family id -> KeyLockInfo - virtual std::unordered_multimap - GetLockStatusData() = 0; - virtual std::vector GetDeadlockInfoBuffer() = 0; - virtual void SetDeadlockInfoBufferSize(uint32_t target_size) = 0; - - protected: - // To Create an TransactionDB, call Open() - // The ownership of db is transferred to the base StackableDB - explicit TransactionDB(DB* db) : StackableDB(db) {} - // No copying allowed - TransactionDB(const TransactionDB&) = delete; - void operator=(const TransactionDB&) = delete; -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/transaction_db_mutex.h b/dist/darwin_amd64/include/rocksdb/utilities/transaction_db_mutex.h deleted file mode 100644 index 96a42ad..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/transaction_db_mutex.h +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once -#ifndef ROCKSDB_LITE - -#include - -#include "rocksdb/status.h" - -namespace ROCKSDB_NAMESPACE { - -// TransactionDBMutex and TransactionDBCondVar APIs allows applications to -// implement custom mutexes and condition variables to be used by a -// TransactionDB when locking keys. -// -// To open a TransactionDB with a custom TransactionDBMutexFactory, set -// TransactionDBOptions.custom_mutex_factory. - -class TransactionDBMutex { - public: - virtual ~TransactionDBMutex() {} - - // Attempt to acquire lock. Return OK on success, or other Status on failure. - // If returned status is OK, TransactionDB will eventually call UnLock(). - virtual Status Lock() = 0; - - // Attempt to acquire lock. If timeout is non-negative, operation may be - // failed after this many microseconds. - // Returns OK on success, - // TimedOut if timed out, - // or other Status on failure. - // If returned status is OK, TransactionDB will eventually call UnLock(). - virtual Status TryLockFor(int64_t timeout_time) = 0; - - // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() - virtual void UnLock() = 0; -}; - -class TransactionDBCondVar { - public: - virtual ~TransactionDBCondVar() {} - - // Block current thread until condition variable is notified by a call to - // Notify() or NotifyAll(). Wait() will be called with mutex locked. - // Returns OK if notified. - // Returns non-OK if TransactionDB should stop waiting and fail the operation. - // May return OK spuriously even if not notified. - virtual Status Wait(std::shared_ptr mutex) = 0; - - // Block current thread until condition variable is notified by a call to - // Notify() or NotifyAll(), or if the timeout is reached. - // Wait() will be called with mutex locked. - // - // If timeout is non-negative, operation should be failed after this many - // microseconds. - // If implementing a custom version of this class, the implementation may - // choose to ignore the timeout. - // - // Returns OK if notified. - // Returns TimedOut if timeout is reached. - // Returns other status if TransactionDB should otherwis stop waiting and - // fail the operation. - // May return OK spuriously even if not notified. - virtual Status WaitFor(std::shared_ptr mutex, - int64_t timeout_time) = 0; - - // If any threads are waiting on *this, unblock at least one of the - // waiting threads. - virtual void Notify() = 0; - - // Unblocks all threads waiting on *this. - virtual void NotifyAll() = 0; -}; - -// Factory class that can allocate mutexes and condition variables. -class TransactionDBMutexFactory { - public: - // Create a TransactionDBMutex object. - virtual std::shared_ptr AllocateMutex() = 0; - - // Create a TransactionDBCondVar object. - virtual std::shared_ptr AllocateCondVar() = 0; - - virtual ~TransactionDBMutexFactory() {} -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/utility_db.h b/dist/darwin_amd64/include/rocksdb/utilities/utility_db.h deleted file mode 100644 index cf2e581..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/utility_db.h +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once -#ifndef ROCKSDB_LITE -#include -#include - -#include "rocksdb/db.h" -#include "rocksdb/utilities/db_ttl.h" -#include "rocksdb/utilities/stackable_db.h" - -namespace ROCKSDB_NAMESPACE { - -// Please don't use this class. It's deprecated -class UtilityDB { - public: - // This function is here only for backwards compatibility. Please use the - // functions defined in DBWithTTl (rocksdb/utilities/db_ttl.h) - // (deprecated) -#if defined(__GNUC__) || defined(__clang__) - __attribute__((deprecated)) -#elif _WIN32 - __declspec(deprecated) -#endif - static Status - OpenTtlDB(const Options& options, const std::string& name, - StackableDB** dbptr, int32_t ttl = 0, bool read_only = false); -}; - -} // namespace ROCKSDB_NAMESPACE -#endif // ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/utilities/write_batch_with_index.h b/dist/darwin_amd64/include/rocksdb/utilities/write_batch_with_index.h deleted file mode 100644 index 424aa15..0000000 --- a/dist/darwin_amd64/include/rocksdb/utilities/write_batch_with_index.h +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// A WriteBatchWithIndex with a binary searchable index built for all the keys -// inserted. -#pragma once - -#ifndef ROCKSDB_LITE - -#include -#include -#include - -#include "rocksdb/comparator.h" -#include "rocksdb/iterator.h" -#include "rocksdb/slice.h" -#include "rocksdb/status.h" -#include "rocksdb/write_batch.h" -#include "rocksdb/write_batch_base.h" - -namespace ROCKSDB_NAMESPACE { - -class ColumnFamilyHandle; -class Comparator; -class DB; -class ReadCallback; -struct ReadOptions; -struct DBOptions; - -enum WriteType { - kPutRecord, - kMergeRecord, - kDeleteRecord, - kSingleDeleteRecord, - kDeleteRangeRecord, - kLogDataRecord, - kXIDRecord, -}; - -// an entry for Put, Merge, Delete, or SingleDelete entry for write batches. -// Used in WBWIIterator. -struct WriteEntry { - WriteType type; - Slice key; - Slice value; -}; - -// Iterator of one column family out of a WriteBatchWithIndex. -class WBWIIterator { - public: - virtual ~WBWIIterator() {} - - virtual bool Valid() const = 0; - - virtual void SeekToFirst() = 0; - - virtual void SeekToLast() = 0; - - virtual void Seek(const Slice& key) = 0; - - virtual void SeekForPrev(const Slice& key) = 0; - - virtual void Next() = 0; - - virtual void Prev() = 0; - - // the return WriteEntry is only valid until the next mutation of - // WriteBatchWithIndex - virtual WriteEntry Entry() const = 0; - - virtual Status status() const = 0; -}; - -// A WriteBatchWithIndex with a binary searchable index built for all the keys -// inserted. -// In Put(), Merge() Delete(), or SingleDelete(), the same function of the -// wrapped will be called. At the same time, indexes will be built. -// By calling GetWriteBatch(), a user will get the WriteBatch for the data -// they inserted, which can be used for DB::Write(). -// A user can call NewIterator() to create an iterator. -class WriteBatchWithIndex : public WriteBatchBase { - public: - // backup_index_comparator: the backup comparator used to compare keys - // within the same column family, if column family is not given in the - // interface, or we can't find a column family from the column family handle - // passed in, backup_index_comparator will be used for the column family. - // reserved_bytes: reserved bytes in underlying WriteBatch - // max_bytes: maximum size of underlying WriteBatch in bytes - // overwrite_key: if true, overwrite the key in the index when inserting - // the same key as previously, so iterator will never - // show two entries with the same key. - explicit WriteBatchWithIndex( - const Comparator* backup_index_comparator = BytewiseComparator(), - size_t reserved_bytes = 0, bool overwrite_key = false, - size_t max_bytes = 0); - - ~WriteBatchWithIndex() override; - WriteBatchWithIndex(WriteBatchWithIndex&&); - WriteBatchWithIndex& operator=(WriteBatchWithIndex&&); - - using WriteBatchBase::Put; - Status Put(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override; - - Status Put(const Slice& key, const Slice& value) override; - - using WriteBatchBase::Merge; - Status Merge(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override; - - Status Merge(const Slice& key, const Slice& value) override; - - using WriteBatchBase::Delete; - Status Delete(ColumnFamilyHandle* column_family, const Slice& key) override; - Status Delete(const Slice& key) override; - - using WriteBatchBase::SingleDelete; - Status SingleDelete(ColumnFamilyHandle* column_family, - const Slice& key) override; - Status SingleDelete(const Slice& key) override; - - using WriteBatchBase::DeleteRange; - Status DeleteRange(ColumnFamilyHandle* /* column_family */, - const Slice& /* begin_key */, - const Slice& /* end_key */) override { - return Status::NotSupported( - "DeleteRange unsupported in WriteBatchWithIndex"); - } - Status DeleteRange(const Slice& /* begin_key */, - const Slice& /* end_key */) override { - return Status::NotSupported( - "DeleteRange unsupported in WriteBatchWithIndex"); - } - - using WriteBatchBase::PutLogData; - Status PutLogData(const Slice& blob) override; - - using WriteBatchBase::Clear; - void Clear() override; - - using WriteBatchBase::GetWriteBatch; - WriteBatch* GetWriteBatch() override; - - // Create an iterator of a column family. User can call iterator.Seek() to - // search to the next entry of or after a key. Keys will be iterated in the - // order given by index_comparator. For multiple updates on the same key, - // each update will be returned as a separate entry, in the order of update - // time. - // - // The returned iterator should be deleted by the caller. - WBWIIterator* NewIterator(ColumnFamilyHandle* column_family); - // Create an iterator of the default column family. - WBWIIterator* NewIterator(); - - // Will create a new Iterator that will use WBWIIterator as a delta and - // base_iterator as base. - // - // This function is only supported if the WriteBatchWithIndex was - // constructed with overwrite_key=true. - // - // The returned iterator should be deleted by the caller. - // The base_iterator is now 'owned' by the returned iterator. Deleting the - // returned iterator will also delete the base_iterator. - // - // Updating write batch with the current key of the iterator is not safe. - // We strongly recommand users not to do it. It will invalidate the current - // key() and value() of the iterator. This invalidation happens even before - // the write batch update finishes. The state may recover after Next() is - // called. - Iterator* NewIteratorWithBase(ColumnFamilyHandle* column_family, - Iterator* base_iterator, - const ReadOptions* opts = nullptr); - // default column family - Iterator* NewIteratorWithBase(Iterator* base_iterator); - - // Similar to DB::Get() but will only read the key from this batch. - // If the batch does not have enough data to resolve Merge operations, - // MergeInProgress status may be returned. - Status GetFromBatch(ColumnFamilyHandle* column_family, - const DBOptions& options, const Slice& key, - std::string* value); - - // Similar to previous function but does not require a column_family. - // Note: An InvalidArgument status will be returned if there are any Merge - // operators for this key. Use previous method instead. - Status GetFromBatch(const DBOptions& options, const Slice& key, - std::string* value) { - return GetFromBatch(nullptr, options, key, value); - } - - // Similar to DB::Get() but will also read writes from this batch. - // - // This function will query both this batch and the DB and then merge - // the results using the DB's merge operator (if the batch contains any - // merge requests). - // - // Setting read_options.snapshot will affect what is read from the DB - // but will NOT change which keys are read from the batch (the keys in - // this batch do not yet belong to any snapshot and will be fetched - // regardless). - Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, - const Slice& key, std::string* value); - - // An overload of the above method that receives a PinnableSlice - Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, - const Slice& key, PinnableSlice* value); - - Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value); - - // An overload of the above method that receives a PinnableSlice - Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, - ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* value); - - void MultiGetFromBatchAndDB(DB* db, const ReadOptions& read_options, - ColumnFamilyHandle* column_family, - const size_t num_keys, const Slice* keys, - PinnableSlice* values, Status* statuses, - bool sorted_input); - - // Records the state of the batch for future calls to RollbackToSavePoint(). - // May be called multiple times to set multiple save points. - void SetSavePoint() override; - - // Remove all entries in this batch (Put, Merge, Delete, SingleDelete, - // PutLogData) since the most recent call to SetSavePoint() and removes the - // most recent save point. - // If there is no previous call to SetSavePoint(), behaves the same as - // Clear(). - // - // Calling RollbackToSavePoint invalidates any open iterators on this batch. - // - // Returns Status::OK() on success, - // Status::NotFound() if no previous call to SetSavePoint(), - // or other Status on corruption. - Status RollbackToSavePoint() override; - - // Pop the most recent save point. - // If there is no previous call to SetSavePoint(), Status::NotFound() - // will be returned. - // Otherwise returns Status::OK(). - Status PopSavePoint() override; - - void SetMaxBytes(size_t max_bytes) override; - size_t GetDataSize() const; - - private: - friend class PessimisticTransactionDB; - friend class WritePreparedTxn; - friend class WriteUnpreparedTxn; - friend class WriteBatchWithIndex_SubBatchCnt_Test; - // Returns the number of sub-batches inside the write batch. A sub-batch - // starts right before inserting a key that is a duplicate of a key in the - // last sub-batch. - size_t SubBatchCnt(); - - Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, - ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* value, ReadCallback* callback); - void MultiGetFromBatchAndDB(DB* db, const ReadOptions& read_options, - ColumnFamilyHandle* column_family, - const size_t num_keys, const Slice* keys, - PinnableSlice* values, Status* statuses, - bool sorted_input, ReadCallback* callback); - struct Rep; - std::unique_ptr rep; -}; - -} // namespace ROCKSDB_NAMESPACE - -#endif // !ROCKSDB_LITE diff --git a/dist/darwin_amd64/include/rocksdb/version.h b/dist/darwin_amd64/include/rocksdb/version.h deleted file mode 100644 index c2631d0..0000000 --- a/dist/darwin_amd64/include/rocksdb/version.h +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#pragma once - -#define ROCKSDB_MAJOR 6 -#define ROCKSDB_MINOR 14 -#define ROCKSDB_PATCH 6 - -// Do not use these. We made the mistake of declaring macros starting with -// double underscore. Now we have to live with our choice. We'll deprecate these -// at some point -#define __ROCKSDB_MAJOR__ ROCKSDB_MAJOR -#define __ROCKSDB_MINOR__ ROCKSDB_MINOR -#define __ROCKSDB_PATCH__ ROCKSDB_PATCH diff --git a/dist/darwin_amd64/include/rocksdb/wal_filter.h b/dist/darwin_amd64/include/rocksdb/wal_filter.h deleted file mode 100644 index 98eddc2..0000000 --- a/dist/darwin_amd64/include/rocksdb/wal_filter.h +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#include -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class WriteBatch; - -// WALFilter allows an application to inspect write-ahead-log (WAL) -// records or modify their processing on recovery. -// Please see the details below. -class WalFilter { - public: - enum class WalProcessingOption { - // Continue processing as usual - kContinueProcessing = 0, - // Ignore the current record but continue processing of log(s) - kIgnoreCurrentRecord = 1, - // Stop replay of logs and discard logs - // Logs won't be replayed on subsequent recovery - kStopReplay = 2, - // Corrupted record detected by filter - kCorruptedRecord = 3, - // Marker for enum count - kWalProcessingOptionMax = 4 - }; - - virtual ~WalFilter() {} - - // Provide ColumnFamily->LogNumber map to filter - // so that filter can determine whether a log number applies to a given - // column family (i.e. that log hasn't been flushed to SST already for the - // column family). - // We also pass in name->id map as only name is known during - // recovery (as handles are opened post-recovery). - // while write batch callbacks happen in terms of column family id. - // - // @params cf_lognumber_map column_family_id to lognumber map - // @params cf_name_id_map column_family_name to column_family_id map - - virtual void ColumnFamilyLogNumberMap( - const std::map& /*cf_lognumber_map*/, - const std::map& /*cf_name_id_map*/) {} - - // LogRecord is invoked for each log record encountered for all the logs - // during replay on logs on recovery. This method can be used to: - // * inspect the record (using the batch parameter) - // * ignoring current record - // (by returning WalProcessingOption::kIgnoreCurrentRecord) - // * reporting corrupted record - // (by returning WalProcessingOption::kCorruptedRecord) - // * stop log replay - // (by returning kStop replay) - please note that this implies - // discarding the logs from current record onwards. - // - // @params log_number log_number of the current log. - // Filter might use this to determine if the log - // record is applicable to a certain column family. - // @params log_file_name log file name - only for informational purposes - // @params batch batch encountered in the log during recovery - // @params new_batch new_batch to populate if filter wants to change - // the batch (for example to filter some records out, - // or alter some records). - // Please note that the new batch MUST NOT contain - // more records than original, else recovery would - // be failed. - // @params batch_changed Whether batch was changed by the filter. - // It must be set to true if new_batch was populated, - // else new_batch has no effect. - // @returns Processing option for the current record. - // Please see WalProcessingOption enum above for - // details. - virtual WalProcessingOption LogRecordFound( - unsigned long long /*log_number*/, const std::string& /*log_file_name*/, - const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { - // Default implementation falls back to older function for compatibility - return LogRecord(batch, new_batch, batch_changed); - } - - // Please see the comments for LogRecord above. This function is for - // compatibility only and contains a subset of parameters. - // New code should use the function above. - virtual WalProcessingOption LogRecord(const WriteBatch& /*batch*/, - WriteBatch* /*new_batch*/, - bool* /*batch_changed*/) const { - return WalProcessingOption::kContinueProcessing; - } - - // Returns a name that identifies this WAL filter. - // The name will be printed to LOG file on start up for diagnosis. - virtual const char* Name() const = 0; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/write_batch.h b/dist/darwin_amd64/include/rocksdb/write_batch.h deleted file mode 100644 index 51fd4d8..0000000 --- a/dist/darwin_amd64/include/rocksdb/write_batch.h +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// WriteBatch holds a collection of updates to apply atomically to a DB. -// -// The updates are applied in the order in which they are added -// to the WriteBatch. For example, the value of "key" will be "v3" -// after the following batch is written: -// -// batch.Put("key", "v1"); -// batch.Delete("key"); -// batch.Put("key", "v2"); -// batch.Put("key", "v3"); -// -// Multiple threads can invoke const methods on a WriteBatch without -// external synchronization, but if any of the threads may call a -// non-const method, all threads accessing the same WriteBatch must use -// external synchronization. - -#pragma once - -#include -#include -#include -#include -#include -#include "rocksdb/status.h" -#include "rocksdb/write_batch_base.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -class ColumnFamilyHandle; -struct SavePoints; -struct SliceParts; - -struct SavePoint { - size_t size; // size of rep_ - int count; // count of elements in rep_ - uint32_t content_flags; - - SavePoint() : size(0), count(0), content_flags(0) {} - - SavePoint(size_t _size, int _count, uint32_t _flags) - : size(_size), count(_count), content_flags(_flags) {} - - void clear() { - size = 0; - count = 0; - content_flags = 0; - } - - bool is_cleared() const { return (size | count | content_flags) == 0; } -}; - -class WriteBatch : public WriteBatchBase { - public: - explicit WriteBatch(size_t reserved_bytes = 0, size_t max_bytes = 0); - explicit WriteBatch(size_t reserved_bytes, size_t max_bytes, size_t ts_sz); - ~WriteBatch() override; - - using WriteBatchBase::Put; - // Store the mapping "key->value" in the database. - Status Put(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override; - Status Put(const Slice& key, const Slice& value) override { - return Put(nullptr, key, value); - } - - // Variant of Put() that gathers output like writev(2). The key and value - // that will be written to the database are concatenations of arrays of - // slices. - Status Put(ColumnFamilyHandle* column_family, const SliceParts& key, - const SliceParts& value) override; - Status Put(const SliceParts& key, const SliceParts& value) override { - return Put(nullptr, key, value); - } - - using WriteBatchBase::Delete; - // If the database contains a mapping for "key", erase it. Else do nothing. - Status Delete(ColumnFamilyHandle* column_family, const Slice& key) override; - Status Delete(const Slice& key) override { return Delete(nullptr, key); } - - // variant that takes SliceParts - Status Delete(ColumnFamilyHandle* column_family, - const SliceParts& key) override; - Status Delete(const SliceParts& key) override { return Delete(nullptr, key); } - - using WriteBatchBase::SingleDelete; - // WriteBatch implementation of DB::SingleDelete(). See db.h. - Status SingleDelete(ColumnFamilyHandle* column_family, - const Slice& key) override; - Status SingleDelete(const Slice& key) override { - return SingleDelete(nullptr, key); - } - - // variant that takes SliceParts - Status SingleDelete(ColumnFamilyHandle* column_family, - const SliceParts& key) override; - Status SingleDelete(const SliceParts& key) override { - return SingleDelete(nullptr, key); - } - - using WriteBatchBase::DeleteRange; - // WriteBatch implementation of DB::DeleteRange(). See db.h. - Status DeleteRange(ColumnFamilyHandle* column_family, const Slice& begin_key, - const Slice& end_key) override; - Status DeleteRange(const Slice& begin_key, const Slice& end_key) override { - return DeleteRange(nullptr, begin_key, end_key); - } - - // variant that takes SliceParts - Status DeleteRange(ColumnFamilyHandle* column_family, - const SliceParts& begin_key, - const SliceParts& end_key) override; - Status DeleteRange(const SliceParts& begin_key, - const SliceParts& end_key) override { - return DeleteRange(nullptr, begin_key, end_key); - } - - using WriteBatchBase::Merge; - // Merge "value" with the existing value of "key" in the database. - // "key->merge(existing, value)" - Status Merge(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override; - Status Merge(const Slice& key, const Slice& value) override { - return Merge(nullptr, key, value); - } - - // variant that takes SliceParts - Status Merge(ColumnFamilyHandle* column_family, const SliceParts& key, - const SliceParts& value) override; - Status Merge(const SliceParts& key, const SliceParts& value) override { - return Merge(nullptr, key, value); - } - - using WriteBatchBase::PutLogData; - // Append a blob of arbitrary size to the records in this batch. The blob will - // be stored in the transaction log but not in any other file. In particular, - // it will not be persisted to the SST files. When iterating over this - // WriteBatch, WriteBatch::Handler::LogData will be called with the contents - // of the blob as it is encountered. Blobs, puts, deletes, and merges will be - // encountered in the same order in which they were inserted. The blob will - // NOT consume sequence number(s) and will NOT increase the count of the batch - // - // Example application: add timestamps to the transaction log for use in - // replication. - Status PutLogData(const Slice& blob) override; - - using WriteBatchBase::Clear; - // Clear all updates buffered in this batch. - void Clear() override; - - // Records the state of the batch for future calls to RollbackToSavePoint(). - // May be called multiple times to set multiple save points. - void SetSavePoint() override; - - // Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the - // most recent call to SetSavePoint() and removes the most recent save point. - // If there is no previous call to SetSavePoint(), Status::NotFound() - // will be returned. - // Otherwise returns Status::OK(). - Status RollbackToSavePoint() override; - - // Pop the most recent save point. - // If there is no previous call to SetSavePoint(), Status::NotFound() - // will be returned. - // Otherwise returns Status::OK(). - Status PopSavePoint() override; - - // Support for iterating over the contents of a batch. - class Handler { - public: - virtual ~Handler(); - // All handler functions in this class provide default implementations so - // we won't break existing clients of Handler on a source code level when - // adding a new member function. - - // default implementation will just call Put without column family for - // backwards compatibility. If the column family is not default, - // the function is noop - virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value) { - if (column_family_id == 0) { - // Put() historically doesn't return status. We didn't want to be - // backwards incompatible so we didn't change the return status - // (this is a public API). We do an ordinary get and return Status::OK() - Put(key, value); - return Status::OK(); - } - return Status::InvalidArgument( - "non-default column family and PutCF not implemented"); - } - virtual void Put(const Slice& /*key*/, const Slice& /*value*/) {} - - virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) { - if (column_family_id == 0) { - Delete(key); - return Status::OK(); - } - return Status::InvalidArgument( - "non-default column family and DeleteCF not implemented"); - } - virtual void Delete(const Slice& /*key*/) {} - - virtual Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) { - if (column_family_id == 0) { - SingleDelete(key); - return Status::OK(); - } - return Status::InvalidArgument( - "non-default column family and SingleDeleteCF not implemented"); - } - virtual void SingleDelete(const Slice& /*key*/) {} - - virtual Status DeleteRangeCF(uint32_t /*column_family_id*/, - const Slice& /*begin_key*/, - const Slice& /*end_key*/) { - return Status::InvalidArgument("DeleteRangeCF not implemented"); - } - - virtual Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& value) { - if (column_family_id == 0) { - Merge(key, value); - return Status::OK(); - } - return Status::InvalidArgument( - "non-default column family and MergeCF not implemented"); - } - virtual void Merge(const Slice& /*key*/, const Slice& /*value*/) {} - - virtual Status PutBlobIndexCF(uint32_t /*column_family_id*/, - const Slice& /*key*/, - const Slice& /*value*/) { - return Status::InvalidArgument("PutBlobIndexCF not implemented"); - } - - // The default implementation of LogData does nothing. - virtual void LogData(const Slice& blob); - - virtual Status MarkBeginPrepare(bool = false) { - return Status::InvalidArgument("MarkBeginPrepare() handler not defined."); - } - - virtual Status MarkEndPrepare(const Slice& /*xid*/) { - return Status::InvalidArgument("MarkEndPrepare() handler not defined."); - } - - virtual Status MarkNoop(bool /*empty_batch*/) { - return Status::InvalidArgument("MarkNoop() handler not defined."); - } - - virtual Status MarkRollback(const Slice& /*xid*/) { - return Status::InvalidArgument( - "MarkRollbackPrepare() handler not defined."); - } - - virtual Status MarkCommit(const Slice& /*xid*/) { - return Status::InvalidArgument("MarkCommit() handler not defined."); - } - - // Continue is called by WriteBatch::Iterate. If it returns false, - // iteration is halted. Otherwise, it continues iterating. The default - // implementation always returns true. - virtual bool Continue(); - - protected: - friend class WriteBatchInternal; - virtual bool WriteAfterCommit() const { return true; } - virtual bool WriteBeforePrepare() const { return false; } - }; - Status Iterate(Handler* handler) const; - - // Retrieve the serialized version of this batch. - const std::string& Data() const { return rep_; } - - // Retrieve data size of the batch. - size_t GetDataSize() const { return rep_.size(); } - - // Returns the number of updates in the batch - uint32_t Count() const; - - // Returns true if PutCF will be called during Iterate - bool HasPut() const; - - // Returns true if DeleteCF will be called during Iterate - bool HasDelete() const; - - // Returns true if SingleDeleteCF will be called during Iterate - bool HasSingleDelete() const; - - // Returns true if DeleteRangeCF will be called during Iterate - bool HasDeleteRange() const; - - // Returns true if MergeCF will be called during Iterate - bool HasMerge() const; - - // Returns true if MarkBeginPrepare will be called during Iterate - bool HasBeginPrepare() const; - - // Returns true if MarkEndPrepare will be called during Iterate - bool HasEndPrepare() const; - - // Returns trie if MarkCommit will be called during Iterate - bool HasCommit() const; - - // Returns trie if MarkRollback will be called during Iterate - bool HasRollback() const; - - // Assign timestamp to write batch - Status AssignTimestamp(const Slice& ts); - - // Assign timestamps to write batch - Status AssignTimestamps(const std::vector& ts_list); - - using WriteBatchBase::GetWriteBatch; - WriteBatch* GetWriteBatch() override { return this; } - - // Constructor with a serialized string object - explicit WriteBatch(const std::string& rep); - explicit WriteBatch(std::string&& rep); - - WriteBatch(const WriteBatch& src); - WriteBatch(WriteBatch&& src) noexcept; - WriteBatch& operator=(const WriteBatch& src); - WriteBatch& operator=(WriteBatch&& src); - - // marks this point in the WriteBatch as the last record to - // be inserted into the WAL, provided the WAL is enabled - void MarkWalTerminationPoint(); - const SavePoint& GetWalTerminationPoint() const { return wal_term_point_; } - - void SetMaxBytes(size_t max_bytes) override { max_bytes_ = max_bytes; } - - private: - friend class WriteBatchInternal; - friend class LocalSavePoint; - // TODO(myabandeh): this is needed for a hack to collapse the write batch and - // remove duplicate keys. Remove it when the hack is replaced with a proper - // solution. - friend class WriteBatchWithIndex; - std::unique_ptr save_points_; - - // When sending a WriteBatch through WriteImpl we might want to - // specify that only the first x records of the batch be written to - // the WAL. - SavePoint wal_term_point_; - - // For HasXYZ. Mutable to allow lazy computation of results - mutable std::atomic content_flags_; - - // Performs deferred computation of content_flags if necessary - uint32_t ComputeContentFlags() const; - - // Maximum size of rep_. - size_t max_bytes_; - - // Is the content of the batch the application's latest state that meant only - // to be used for recovery? Refer to - // TransactionOptions::use_only_the_last_commit_time_batch_for_recovery for - // more details. - bool is_latest_persistent_state_ = false; - - protected: - std::string rep_; // See comment in write_batch.cc for the format of rep_ - const size_t timestamp_size_; - - // Intentionally copyable -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/write_batch_base.h b/dist/darwin_amd64/include/rocksdb/write_batch_base.h deleted file mode 100644 index 19ff877..0000000 --- a/dist/darwin_amd64/include/rocksdb/write_batch_base.h +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#pragma once - -#include - -#include "rocksdb/rocksdb_namespace.h" - -namespace ROCKSDB_NAMESPACE { - -class Slice; -class Status; -class ColumnFamilyHandle; -class WriteBatch; -struct SliceParts; - -// Abstract base class that defines the basic interface for a write batch. -// See WriteBatch for a basic implementation and WrithBatchWithIndex for an -// indexed implementation. -class WriteBatchBase { - public: - virtual ~WriteBatchBase() {} - - // Store the mapping "key->value" in the database. - virtual Status Put(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) = 0; - virtual Status Put(const Slice& key, const Slice& value) = 0; - - // Variant of Put() that gathers output like writev(2). The key and value - // that will be written to the database are concatenations of arrays of - // slices. - virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key, - const SliceParts& value); - virtual Status Put(const SliceParts& key, const SliceParts& value); - - // Merge "value" with the existing value of "key" in the database. - // "key->merge(existing, value)" - virtual Status Merge(ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) = 0; - virtual Status Merge(const Slice& key, const Slice& value) = 0; - - // variant that takes SliceParts - virtual Status Merge(ColumnFamilyHandle* column_family, const SliceParts& key, - const SliceParts& value); - virtual Status Merge(const SliceParts& key, const SliceParts& value); - - // If the database contains a mapping for "key", erase it. Else do nothing. - virtual Status Delete(ColumnFamilyHandle* column_family, - const Slice& key) = 0; - virtual Status Delete(const Slice& key) = 0; - - // variant that takes SliceParts - virtual Status Delete(ColumnFamilyHandle* column_family, - const SliceParts& key); - virtual Status Delete(const SliceParts& key); - - // If the database contains a mapping for "key", erase it. Expects that the - // key was not overwritten. Else do nothing. - virtual Status SingleDelete(ColumnFamilyHandle* column_family, - const Slice& key) = 0; - virtual Status SingleDelete(const Slice& key) = 0; - - // variant that takes SliceParts - virtual Status SingleDelete(ColumnFamilyHandle* column_family, - const SliceParts& key); - virtual Status SingleDelete(const SliceParts& key); - - // If the database contains mappings in the range ["begin_key", "end_key"), - // erase them. Else do nothing. - virtual Status DeleteRange(ColumnFamilyHandle* column_family, - const Slice& begin_key, const Slice& end_key) = 0; - virtual Status DeleteRange(const Slice& begin_key, const Slice& end_key) = 0; - - // variant that takes SliceParts - virtual Status DeleteRange(ColumnFamilyHandle* column_family, - const SliceParts& begin_key, - const SliceParts& end_key); - virtual Status DeleteRange(const SliceParts& begin_key, - const SliceParts& end_key); - - // Append a blob of arbitrary size to the records in this batch. The blob will - // be stored in the transaction log but not in any other file. In particular, - // it will not be persisted to the SST files. When iterating over this - // WriteBatch, WriteBatch::Handler::LogData will be called with the contents - // of the blob as it is encountered. Blobs, puts, deletes, and merges will be - // encountered in the same order in which they were inserted. The blob will - // NOT consume sequence number(s) and will NOT increase the count of the batch - // - // Example application: add timestamps to the transaction log for use in - // replication. - virtual Status PutLogData(const Slice& blob) = 0; - - // Clear all updates buffered in this batch. - virtual void Clear() = 0; - - // Covert this batch into a WriteBatch. This is an abstracted way of - // converting any WriteBatchBase(eg WriteBatchWithIndex) into a basic - // WriteBatch. - virtual WriteBatch* GetWriteBatch() = 0; - - // Records the state of the batch for future calls to RollbackToSavePoint(). - // May be called multiple times to set multiple save points. - virtual void SetSavePoint() = 0; - - // Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the - // most recent call to SetSavePoint() and removes the most recent save point. - // If there is no previous call to SetSavePoint(), behaves the same as - // Clear(). - virtual Status RollbackToSavePoint() = 0; - - // Pop the most recent save point. - // If there is no previous call to SetSavePoint(), Status::NotFound() - // will be returned. - // Otherwise returns Status::OK(). - virtual Status PopSavePoint() = 0; - - // Sets the maximum size of the write batch in bytes. 0 means no limit. - virtual void SetMaxBytes(size_t max_bytes) = 0; -}; - -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/write_buffer_manager.h b/dist/darwin_amd64/include/rocksdb/write_buffer_manager.h deleted file mode 100644 index ae1c98c..0000000 --- a/dist/darwin_amd64/include/rocksdb/write_buffer_manager.h +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// WriteBufferManager is for managing memory allocation for one or more -// MemTables. - -#pragma once - -#include -#include -#include "rocksdb/cache.h" - -namespace ROCKSDB_NAMESPACE { - -class WriteBufferManager { - public: - // _buffer_size = 0 indicates no limit. Memory won't be capped. - // memory_usage() won't be valid and ShouldFlush() will always return true. - // if `cache` is provided, we'll put dummy entries in the cache and cost - // the memory allocated to the cache. It can be used even if _buffer_size = 0. - explicit WriteBufferManager(size_t _buffer_size, - std::shared_ptr cache = {}); - // No copying allowed - WriteBufferManager(const WriteBufferManager&) = delete; - WriteBufferManager& operator=(const WriteBufferManager&) = delete; - - ~WriteBufferManager(); - - bool enabled() const { return buffer_size_ != 0; } - - bool cost_to_cache() const { return cache_rep_ != nullptr; } - - // Only valid if enabled() - size_t memory_usage() const { - return memory_used_.load(std::memory_order_relaxed); - } - size_t mutable_memtable_memory_usage() const { - return memory_active_.load(std::memory_order_relaxed); - } - size_t buffer_size() const { return buffer_size_; } - - // Should only be called from write thread - bool ShouldFlush() const { - if (enabled()) { - if (mutable_memtable_memory_usage() > mutable_limit_) { - return true; - } - if (memory_usage() >= buffer_size_ && - mutable_memtable_memory_usage() >= buffer_size_ / 2) { - // If the memory exceeds the buffer size, we trigger more aggressive - // flush. But if already more than half memory is being flushed, - // triggering more flush may not help. We will hold it instead. - return true; - } - } - return false; - } - - void ReserveMem(size_t mem) { - if (cache_rep_ != nullptr) { - ReserveMemWithCache(mem); - } else if (enabled()) { - memory_used_.fetch_add(mem, std::memory_order_relaxed); - } - if (enabled()) { - memory_active_.fetch_add(mem, std::memory_order_relaxed); - } - } - // We are in the process of freeing `mem` bytes, so it is not considered - // when checking the soft limit. - void ScheduleFreeMem(size_t mem) { - if (enabled()) { - memory_active_.fetch_sub(mem, std::memory_order_relaxed); - } - } - void FreeMem(size_t mem) { - if (cache_rep_ != nullptr) { - FreeMemWithCache(mem); - } else if (enabled()) { - memory_used_.fetch_sub(mem, std::memory_order_relaxed); - } - } - - private: - const size_t buffer_size_; - const size_t mutable_limit_; - std::atomic memory_used_; - // Memory that hasn't been scheduled to free. - std::atomic memory_active_; - struct CacheRep; - std::unique_ptr cache_rep_; - - void ReserveMemWithCache(size_t mem); - void FreeMemWithCache(size_t mem); -}; -} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/snappy-c.h b/dist/darwin_amd64/include/snappy-c.h deleted file mode 100644 index 32aa0c6..0000000 --- a/dist/darwin_amd64/include/snappy-c.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2011 Martin Gieseking . - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Plain C interface (a wrapper around the C++ implementation). - */ - -#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_ -#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/* - * Return values; see the documentation for each function to know - * what each can return. - */ -typedef enum { - SNAPPY_OK = 0, - SNAPPY_INVALID_INPUT = 1, - SNAPPY_BUFFER_TOO_SMALL = 2 -} snappy_status; - -/* - * Takes the data stored in "input[0..input_length-1]" and stores - * it in the array pointed to by "compressed". - * - * signals the space available in "compressed". - * If it is not at least equal to "snappy_max_compressed_length(input_length)", - * SNAPPY_BUFFER_TOO_SMALL is returned. After successful compression, - * contains the true length of the compressed output, - * and SNAPPY_OK is returned. - * - * Example: - * size_t output_length = snappy_max_compressed_length(input_length); - * char* output = (char*)malloc(output_length); - * if (snappy_compress(input, input_length, output, &output_length) - * == SNAPPY_OK) { - * ... Process(output, output_length) ... - * } - * free(output); - */ -snappy_status snappy_compress(const char* input, - size_t input_length, - char* compressed, - size_t* compressed_length); - -/* - * Given data in "compressed[0..compressed_length-1]" generated by - * calling the snappy_compress routine, this routine stores - * the uncompressed data to - * uncompressed[0..uncompressed_length-1]. - * Returns failure (a value not equal to SNAPPY_OK) if the message - * is corrupted and could not be decrypted. - * - * signals the space available in "uncompressed". - * If it is not at least equal to the value returned by - * snappy_uncompressed_length for this stream, SNAPPY_BUFFER_TOO_SMALL - * is returned. After successful decompression, - * contains the true length of the decompressed output. - * - * Example: - * size_t output_length; - * if (snappy_uncompressed_length(input, input_length, &output_length) - * != SNAPPY_OK) { - * ... fail ... - * } - * char* output = (char*)malloc(output_length); - * if (snappy_uncompress(input, input_length, output, &output_length) - * == SNAPPY_OK) { - * ... Process(output, output_length) ... - * } - * free(output); - */ -snappy_status snappy_uncompress(const char* compressed, - size_t compressed_length, - char* uncompressed, - size_t* uncompressed_length); - -/* - * Returns the maximal size of the compressed representation of - * input data that is "source_length" bytes in length. - */ -size_t snappy_max_compressed_length(size_t source_length); - -/* - * REQUIRES: "compressed[]" was produced by snappy_compress() - * Returns SNAPPY_OK and stores the length of the uncompressed data in - * *result normally. Returns SNAPPY_INVALID_INPUT on parsing error. - * This operation takes O(1) time. - */ -snappy_status snappy_uncompressed_length(const char* compressed, - size_t compressed_length, - size_t* result); - -/* - * Check if the contents of "compressed[]" can be uncompressed successfully. - * Does not return the uncompressed data; if so, returns SNAPPY_OK, - * or if not, returns SNAPPY_INVALID_INPUT. - * Takes time proportional to compressed_length, but is usually at least a - * factor of four faster than actual decompression. - */ -snappy_status snappy_validate_compressed_buffer(const char* compressed, - size_t compressed_length); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif /* THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */ diff --git a/dist/darwin_amd64/include/snappy-sinksource.h b/dist/darwin_amd64/include/snappy-sinksource.h deleted file mode 100644 index 8afcdaa..0000000 --- a/dist/darwin_amd64/include/snappy-sinksource.h +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2011 Google Inc. All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_ -#define THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_ - -#include - -namespace snappy { - -// A Sink is an interface that consumes a sequence of bytes. -class Sink { - public: - Sink() { } - virtual ~Sink(); - - // Append "bytes[0,n-1]" to this. - virtual void Append(const char* bytes, size_t n) = 0; - - // Returns a writable buffer of the specified length for appending. - // May return a pointer to the caller-owned scratch buffer which - // must have at least the indicated length. The returned buffer is - // only valid until the next operation on this Sink. - // - // After writing at most "length" bytes, call Append() with the - // pointer returned from this function and the number of bytes - // written. Many Append() implementations will avoid copying - // bytes if this function returned an internal buffer. - // - // If a non-scratch buffer is returned, the caller may only pass a - // prefix of it to Append(). That is, it is not correct to pass an - // interior pointer of the returned array to Append(). - // - // The default implementation always returns the scratch buffer. - virtual char* GetAppendBuffer(size_t length, char* scratch); - - // For higher performance, Sink implementations can provide custom - // AppendAndTakeOwnership() and GetAppendBufferVariable() methods. - // These methods can reduce the number of copies done during - // compression/decompression. - - // Append "bytes[0,n-1] to the sink. Takes ownership of "bytes" - // and calls the deleter function as (*deleter)(deleter_arg, bytes, n) - // to free the buffer. deleter function must be non NULL. - // - // The default implementation just calls Append and frees "bytes". - // Other implementations may avoid a copy while appending the buffer. - virtual void AppendAndTakeOwnership( - char* bytes, size_t n, void (*deleter)(void*, const char*, size_t), - void *deleter_arg); - - // Returns a writable buffer for appending and writes the buffer's capacity to - // *allocated_size. Guarantees *allocated_size >= min_size. - // May return a pointer to the caller-owned scratch buffer which must have - // scratch_size >= min_size. - // - // The returned buffer is only valid until the next operation - // on this ByteSink. - // - // After writing at most *allocated_size bytes, call Append() with the - // pointer returned from this function and the number of bytes written. - // Many Append() implementations will avoid copying bytes if this function - // returned an internal buffer. - // - // If the sink implementation allocates or reallocates an internal buffer, - // it should use the desired_size_hint if appropriate. If a caller cannot - // provide a reasonable guess at the desired capacity, it should set - // desired_size_hint = 0. - // - // If a non-scratch buffer is returned, the caller may only pass - // a prefix to it to Append(). That is, it is not correct to pass an - // interior pointer to Append(). - // - // The default implementation always returns the scratch buffer. - virtual char* GetAppendBufferVariable( - size_t min_size, size_t desired_size_hint, char* scratch, - size_t scratch_size, size_t* allocated_size); - - private: - // No copying - Sink(const Sink&); - void operator=(const Sink&); -}; - -// A Source is an interface that yields a sequence of bytes -class Source { - public: - Source() { } - virtual ~Source(); - - // Return the number of bytes left to read from the source - virtual size_t Available() const = 0; - - // Peek at the next flat region of the source. Does not reposition - // the source. The returned region is empty iff Available()==0. - // - // Returns a pointer to the beginning of the region and store its - // length in *len. - // - // The returned region is valid until the next call to Skip() or - // until this object is destroyed, whichever occurs first. - // - // The returned region may be larger than Available() (for example - // if this ByteSource is a view on a substring of a larger source). - // The caller is responsible for ensuring that it only reads the - // Available() bytes. - virtual const char* Peek(size_t* len) = 0; - - // Skip the next n bytes. Invalidates any buffer returned by - // a previous call to Peek(). - // REQUIRES: Available() >= n - virtual void Skip(size_t n) = 0; - - private: - // No copying - Source(const Source&); - void operator=(const Source&); -}; - -// A Source implementation that yields the contents of a flat array -class ByteArraySource : public Source { - public: - ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { } - virtual ~ByteArraySource(); - virtual size_t Available() const; - virtual const char* Peek(size_t* len); - virtual void Skip(size_t n); - private: - const char* ptr_; - size_t left_; -}; - -// A Sink implementation that writes to a flat array without any bound checks. -class UncheckedByteArraySink : public Sink { - public: - explicit UncheckedByteArraySink(char* dest) : dest_(dest) { } - virtual ~UncheckedByteArraySink(); - virtual void Append(const char* data, size_t n); - virtual char* GetAppendBuffer(size_t len, char* scratch); - virtual char* GetAppendBufferVariable( - size_t min_size, size_t desired_size_hint, char* scratch, - size_t scratch_size, size_t* allocated_size); - virtual void AppendAndTakeOwnership( - char* bytes, size_t n, void (*deleter)(void*, const char*, size_t), - void *deleter_arg); - - // Return the current output pointer so that a caller can see how - // many bytes were produced. - // Note: this is not a Sink method. - char* CurrentDestination() const { return dest_; } - private: - char* dest_; -}; - -} // namespace snappy - -#endif // THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_ diff --git a/dist/darwin_amd64/include/snappy-stubs-public.h b/dist/darwin_amd64/include/snappy-stubs-public.h deleted file mode 100644 index a46c309..0000000 --- a/dist/darwin_amd64/include/snappy-stubs-public.h +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2011 Google Inc. All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Various type stubs for the open-source version of Snappy. -// -// This file cannot include config.h, as it is included from snappy.h, -// which is a public header. Instead, snappy-stubs-public.h is generated by -// from snappy-stubs-public.h.in at configure time. - -#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ -#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ - -#include -#include -#include - -#if 1 // HAVE_SYS_UIO_H -#include -#endif // HAVE_SYS_UIO_H - -#define SNAPPY_MAJOR 1 -#define SNAPPY_MINOR 1 -#define SNAPPY_PATCHLEVEL 8 -#define SNAPPY_VERSION \ - ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL) - -namespace snappy { - -using int8 = std::int8_t; -using uint8 = std::uint8_t; -using int16 = std::int16_t; -using uint16 = std::uint16_t; -using int32 = std::int32_t; -using uint32 = std::uint32_t; -using int64 = std::int64_t; -using uint64 = std::uint64_t; - -#if !1 // !HAVE_SYS_UIO_H -// Windows does not have an iovec type, yet the concept is universally useful. -// It is simple to define it ourselves, so we put it inside our own namespace. -struct iovec { - void* iov_base; - size_t iov_len; -}; -#endif // !HAVE_SYS_UIO_H - -} // namespace snappy - -#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ diff --git a/dist/darwin_amd64/include/snappy.h b/dist/darwin_amd64/include/snappy.h deleted file mode 100644 index e9805bf..0000000 --- a/dist/darwin_amd64/include/snappy.h +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2005 and onwards Google Inc. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// A light-weight compression algorithm. It is designed for speed of -// compression and decompression, rather than for the utmost in space -// savings. -// -// For getting better compression ratios when you are compressing data -// with long repeated sequences or compressing data that is similar to -// other data, while still compressing fast, you might look at first -// using BMDiff and then compressing the output of BMDiff with -// Snappy. - -#ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__ -#define THIRD_PARTY_SNAPPY_SNAPPY_H__ - -#include -#include - -#include "snappy-stubs-public.h" - -namespace snappy { - class Source; - class Sink; - - // ------------------------------------------------------------------------ - // Generic compression/decompression routines. - // ------------------------------------------------------------------------ - - // Compress the bytes read from "*source" and append to "*sink". Return the - // number of bytes written. - size_t Compress(Source* source, Sink* sink); - - // Find the uncompressed length of the given stream, as given by the header. - // Note that the true length could deviate from this; the stream could e.g. - // be truncated. - // - // Also note that this leaves "*source" in a state that is unsuitable for - // further operations, such as RawUncompress(). You will need to rewind - // or recreate the source yourself before attempting any further calls. - bool GetUncompressedLength(Source* source, uint32* result); - - // ------------------------------------------------------------------------ - // Higher-level string based routines (should be sufficient for most users) - // ------------------------------------------------------------------------ - - // Sets "*compressed" to the compressed version of "input[0,input_length-1]". - // Original contents of *compressed are lost. - // - // REQUIRES: "input[]" is not an alias of "*compressed". - size_t Compress(const char* input, size_t input_length, - std::string* compressed); - - // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed". - // Original contents of "*uncompressed" are lost. - // - // REQUIRES: "compressed[]" is not an alias of "*uncompressed". - // - // returns false if the message is corrupted and could not be decompressed - bool Uncompress(const char* compressed, size_t compressed_length, - std::string* uncompressed); - - // Decompresses "compressed" to "*uncompressed". - // - // returns false if the message is corrupted and could not be decompressed - bool Uncompress(Source* compressed, Sink* uncompressed); - - // This routine uncompresses as much of the "compressed" as possible - // into sink. It returns the number of valid bytes added to sink - // (extra invalid bytes may have been added due to errors; the caller - // should ignore those). The emitted data typically has length - // GetUncompressedLength(), but may be shorter if an error is - // encountered. - size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed); - - // ------------------------------------------------------------------------ - // Lower-level character array based routines. May be useful for - // efficiency reasons in certain circumstances. - // ------------------------------------------------------------------------ - - // REQUIRES: "compressed" must point to an area of memory that is at - // least "MaxCompressedLength(input_length)" bytes in length. - // - // Takes the data stored in "input[0..input_length]" and stores - // it in the array pointed to by "compressed". - // - // "*compressed_length" is set to the length of the compressed output. - // - // Example: - // char* output = new char[snappy::MaxCompressedLength(input_length)]; - // size_t output_length; - // RawCompress(input, input_length, output, &output_length); - // ... Process(output, output_length) ... - // delete [] output; - void RawCompress(const char* input, - size_t input_length, - char* compressed, - size_t* compressed_length); - - // Given data in "compressed[0..compressed_length-1]" generated by - // calling the Snappy::Compress routine, this routine - // stores the uncompressed data to - // uncompressed[0..GetUncompressedLength(compressed)-1] - // returns false if the message is corrupted and could not be decrypted - bool RawUncompress(const char* compressed, size_t compressed_length, - char* uncompressed); - - // Given data from the byte source 'compressed' generated by calling - // the Snappy::Compress routine, this routine stores the uncompressed - // data to - // uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1] - // returns false if the message is corrupted and could not be decrypted - bool RawUncompress(Source* compressed, char* uncompressed); - - // Given data in "compressed[0..compressed_length-1]" generated by - // calling the Snappy::Compress routine, this routine - // stores the uncompressed data to the iovec "iov". The number of physical - // buffers in "iov" is given by iov_cnt and their cumulative size - // must be at least GetUncompressedLength(compressed). The individual buffers - // in "iov" must not overlap with each other. - // - // returns false if the message is corrupted and could not be decrypted - bool RawUncompressToIOVec(const char* compressed, size_t compressed_length, - const struct iovec* iov, size_t iov_cnt); - - // Given data from the byte source 'compressed' generated by calling - // the Snappy::Compress routine, this routine stores the uncompressed - // data to the iovec "iov". The number of physical - // buffers in "iov" is given by iov_cnt and their cumulative size - // must be at least GetUncompressedLength(compressed). The individual buffers - // in "iov" must not overlap with each other. - // - // returns false if the message is corrupted and could not be decrypted - bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov, - size_t iov_cnt); - - // Returns the maximal size of the compressed representation of - // input data that is "source_bytes" bytes in length; - size_t MaxCompressedLength(size_t source_bytes); - - // REQUIRES: "compressed[]" was produced by RawCompress() or Compress() - // Returns true and stores the length of the uncompressed data in - // *result normally. Returns false on parsing error. - // This operation takes O(1) time. - bool GetUncompressedLength(const char* compressed, size_t compressed_length, - size_t* result); - - // Returns true iff the contents of "compressed[]" can be uncompressed - // successfully. Does not return the uncompressed data. Takes - // time proportional to compressed_length, but is usually at least - // a factor of four faster than actual decompression. - bool IsValidCompressedBuffer(const char* compressed, - size_t compressed_length); - - // Returns true iff the contents of "compressed" can be uncompressed - // successfully. Does not return the uncompressed data. Takes - // time proportional to *compressed length, but is usually at least - // a factor of four faster than actual decompression. - // On success, consumes all of *compressed. On failure, consumes an - // unspecified prefix of *compressed. - bool IsValidCompressed(Source* compressed); - - // The size of a compression block. Note that many parts of the compression - // code assumes that kBlockSize <= 65536; in particular, the hash table - // can only store 16-bit offsets, and EmitCopy() also assumes the offset - // is 65535 bytes or less. Note also that if you change this, it will - // affect the framing format (see framing_format.txt). - // - // Note that there might be older data around that is compressed with larger - // block sizes, so the decompression code should not rely on the - // non-existence of long backreferences. - static constexpr int kBlockLog = 16; - static constexpr size_t kBlockSize = 1 << kBlockLog; - - static constexpr int kMinHashTableBits = 8; - static constexpr size_t kMinHashTableSize = 1 << kMinHashTableBits; - - static constexpr int kMaxHashTableBits = 14; - static constexpr size_t kMaxHashTableSize = 1 << kMaxHashTableBits; -} // end namespace snappy - -#endif // THIRD_PARTY_SNAPPY_SNAPPY_H__ diff --git a/dist/darwin_amd64/include/zbuff.h b/dist/darwin_amd64/include/zbuff.h deleted file mode 100644 index 03cb14a..0000000 --- a/dist/darwin_amd64/include/zbuff.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/* *************************************************************** -* NOTES/WARNINGS -******************************************************************/ -/* The streaming API defined here is deprecated. - * Consider migrating towards ZSTD_compressStream() API in `zstd.h` - * See 'lib/README.md'. - *****************************************************************/ - - -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef ZSTD_BUFFERED_H_23987 -#define ZSTD_BUFFERED_H_23987 - -/* ************************************* -* Dependencies -***************************************/ -#include /* size_t */ -#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ - - -/* *************************************************************** -* Compiler specifics -*****************************************************************/ -/* Deprecation warnings */ -/* Should these warnings be a problem, - * it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc - * or _CRT_SECURE_NO_WARNINGS in Visual. - * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS - */ -#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS -# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API -# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) -# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ >= 3) -# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) -# elif defined(_MSC_VER) -# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message)) -# else -# pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler") -# define ZBUFF_DEPRECATED(message) ZSTDLIB_API -# endif -#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */ - - -/* ************************************* -* Streaming functions -***************************************/ -/* This is the easier "buffered" streaming API, -* using an internal buffer to lift all restrictions on user-provided buffers -* which can be any size, any place, for both input and output. -* ZBUFF and ZSTD are 100% interoperable, -* frames created by one can be decoded by the other one */ - -typedef ZSTD_CStream ZBUFF_CCtx; -ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void); -ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx); - -ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel); -ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); - -ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); -ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); -ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); - -/*-************************************************* -* Streaming compression - howto -* -* A ZBUFF_CCtx object is required to track streaming operation. -* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. -* ZBUFF_CCtx objects can be reused multiple times. -* -* Start by initializing ZBUF_CCtx. -* Use ZBUFF_compressInit() to start a new compression operation. -* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary. -* -* Use ZBUFF_compressContinue() repetitively to consume input stream. -* *srcSizePtr and *dstCapacityPtr can be any size. -* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr. -* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data. -* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst . -* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency) -* or an error code, which can be tested using ZBUFF_isError(). -* -* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush(). -* The nb of bytes written into `dst` will be reported into *dstCapacityPtr. -* Note that the function cannot output more than *dstCapacityPtr, -* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small. -* @return : nb of bytes still present into internal buffer (0 if it's empty) -* or an error code, which can be tested using ZBUFF_isError(). -* -* ZBUFF_compressEnd() instructs to finish a frame. -* It will perform a flush and write frame epilogue. -* The epilogue is required for decoders to consider a frame completed. -* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. -* In which case, call again ZBUFF_compressFlush() to complete the flush. -* @return : nb of bytes still present into internal buffer (0 if it's empty) -* or an error code, which can be tested using ZBUFF_isError(). -* -* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize() -* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency) -* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering. -* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering. -* **************************************************/ - - -typedef ZSTD_DStream ZBUFF_DCtx; -ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void); -ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx); - -ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx); -ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize); - -ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx, - void* dst, size_t* dstCapacityPtr, - const void* src, size_t* srcSizePtr); - -/*-*************************************************************************** -* Streaming decompression howto -* -* A ZBUFF_DCtx object is required to track streaming operations. -* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. -* Use ZBUFF_decompressInit() to start a new decompression operation, -* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary. -* Note that ZBUFF_DCtx objects can be re-init multiple times. -* -* Use ZBUFF_decompressContinue() repetitively to consume your input. -* *srcSizePtr and *dstCapacityPtr can be any size. -* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. -* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. -* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. -* @return : 0 when a frame is completely decoded and fully flushed, -* 1 when there is still some data left within internal buffer to flush, -* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency), -* or an error code, which can be tested using ZBUFF_isError(). -* -* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize() -* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. -* input : ZBUFF_recommendedDInSize == 128KB + 3; -* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . -* *******************************************************************************/ - - -/* ************************************* -* Tool functions -***************************************/ -ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode); -ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode); - -/** Functions below provide recommended buffer sizes for Compression or Decompression operations. -* These sizes are just hints, they tend to offer better latency */ -ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void); -ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void); -ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void); -ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void); - -#endif /* ZSTD_BUFFERED_H_23987 */ - - -#ifdef ZBUFF_STATIC_LINKING_ONLY -#ifndef ZBUFF_STATIC_H_30298098432 -#define ZBUFF_STATIC_H_30298098432 - -/* ==================================================================================== - * The definitions in this section are considered experimental. - * They should never be used in association with a dynamic library, as they may change in the future. - * They are provided for advanced usages. - * Use them only in association with static linking. - * ==================================================================================== */ - -/*--- Dependency ---*/ -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */ -#include "../zstd.h" - - -/*--- Custom memory allocator ---*/ -/*! ZBUFF_createCCtx_advanced() : - * Create a ZBUFF compression context using external alloc and free functions */ -ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem); - -/*! ZBUFF_createDCtx_advanced() : - * Create a ZBUFF decompression context using external alloc and free functions */ -ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem); - - -/*--- Advanced Streaming Initialization ---*/ -ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, - const void* dict, size_t dictSize, - ZSTD_parameters params, unsigned long long pledgedSrcSize); - - -#endif /* ZBUFF_STATIC_H_30298098432 */ -#endif /* ZBUFF_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif diff --git a/dist/darwin_amd64/include/zconf.h b/dist/darwin_amd64/include/zconf.h deleted file mode 100644 index 77398c1..0000000 --- a/dist/darwin_amd64/include/zconf.h +++ /dev/null @@ -1,534 +0,0 @@ -/* zconf.h -- configuration of the zlib compression library - * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#ifndef ZCONF_H -#define ZCONF_H - -/* - * If you *really* need a unique prefix for all types and library functions, - * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. - * Even better than compiling with -DZ_PREFIX would be to use configure to set - * this permanently in zconf.h using "./configure --zprefix". - */ -#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ -# define Z_PREFIX_SET - -/* all linked symbols and init macros */ -# define _dist_code z__dist_code -# define _length_code z__length_code -# define _tr_align z__tr_align -# define _tr_flush_bits z__tr_flush_bits -# define _tr_flush_block z__tr_flush_block -# define _tr_init z__tr_init -# define _tr_stored_block z__tr_stored_block -# define _tr_tally z__tr_tally -# define adler32 z_adler32 -# define adler32_combine z_adler32_combine -# define adler32_combine64 z_adler32_combine64 -# define adler32_z z_adler32_z -# ifndef Z_SOLO -# define compress z_compress -# define compress2 z_compress2 -# define compressBound z_compressBound -# endif -# define crc32 z_crc32 -# define crc32_combine z_crc32_combine -# define crc32_combine64 z_crc32_combine64 -# define crc32_z z_crc32_z -# define deflate z_deflate -# define deflateBound z_deflateBound -# define deflateCopy z_deflateCopy -# define deflateEnd z_deflateEnd -# define deflateGetDictionary z_deflateGetDictionary -# define deflateInit z_deflateInit -# define deflateInit2 z_deflateInit2 -# define deflateInit2_ z_deflateInit2_ -# define deflateInit_ z_deflateInit_ -# define deflateParams z_deflateParams -# define deflatePending z_deflatePending -# define deflatePrime z_deflatePrime -# define deflateReset z_deflateReset -# define deflateResetKeep z_deflateResetKeep -# define deflateSetDictionary z_deflateSetDictionary -# define deflateSetHeader z_deflateSetHeader -# define deflateTune z_deflateTune -# define deflate_copyright z_deflate_copyright -# define get_crc_table z_get_crc_table -# ifndef Z_SOLO -# define gz_error z_gz_error -# define gz_intmax z_gz_intmax -# define gz_strwinerror z_gz_strwinerror -# define gzbuffer z_gzbuffer -# define gzclearerr z_gzclearerr -# define gzclose z_gzclose -# define gzclose_r z_gzclose_r -# define gzclose_w z_gzclose_w -# define gzdirect z_gzdirect -# define gzdopen z_gzdopen -# define gzeof z_gzeof -# define gzerror z_gzerror -# define gzflush z_gzflush -# define gzfread z_gzfread -# define gzfwrite z_gzfwrite -# define gzgetc z_gzgetc -# define gzgetc_ z_gzgetc_ -# define gzgets z_gzgets -# define gzoffset z_gzoffset -# define gzoffset64 z_gzoffset64 -# define gzopen z_gzopen -# define gzopen64 z_gzopen64 -# ifdef _WIN32 -# define gzopen_w z_gzopen_w -# endif -# define gzprintf z_gzprintf -# define gzputc z_gzputc -# define gzputs z_gzputs -# define gzread z_gzread -# define gzrewind z_gzrewind -# define gzseek z_gzseek -# define gzseek64 z_gzseek64 -# define gzsetparams z_gzsetparams -# define gztell z_gztell -# define gztell64 z_gztell64 -# define gzungetc z_gzungetc -# define gzvprintf z_gzvprintf -# define gzwrite z_gzwrite -# endif -# define inflate z_inflate -# define inflateBack z_inflateBack -# define inflateBackEnd z_inflateBackEnd -# define inflateBackInit z_inflateBackInit -# define inflateBackInit_ z_inflateBackInit_ -# define inflateCodesUsed z_inflateCodesUsed -# define inflateCopy z_inflateCopy -# define inflateEnd z_inflateEnd -# define inflateGetDictionary z_inflateGetDictionary -# define inflateGetHeader z_inflateGetHeader -# define inflateInit z_inflateInit -# define inflateInit2 z_inflateInit2 -# define inflateInit2_ z_inflateInit2_ -# define inflateInit_ z_inflateInit_ -# define inflateMark z_inflateMark -# define inflatePrime z_inflatePrime -# define inflateReset z_inflateReset -# define inflateReset2 z_inflateReset2 -# define inflateResetKeep z_inflateResetKeep -# define inflateSetDictionary z_inflateSetDictionary -# define inflateSync z_inflateSync -# define inflateSyncPoint z_inflateSyncPoint -# define inflateUndermine z_inflateUndermine -# define inflateValidate z_inflateValidate -# define inflate_copyright z_inflate_copyright -# define inflate_fast z_inflate_fast -# define inflate_table z_inflate_table -# ifndef Z_SOLO -# define uncompress z_uncompress -# define uncompress2 z_uncompress2 -# endif -# define zError z_zError -# ifndef Z_SOLO -# define zcalloc z_zcalloc -# define zcfree z_zcfree -# endif -# define zlibCompileFlags z_zlibCompileFlags -# define zlibVersion z_zlibVersion - -/* all zlib typedefs in zlib.h and zconf.h */ -# define Byte z_Byte -# define Bytef z_Bytef -# define alloc_func z_alloc_func -# define charf z_charf -# define free_func z_free_func -# ifndef Z_SOLO -# define gzFile z_gzFile -# endif -# define gz_header z_gz_header -# define gz_headerp z_gz_headerp -# define in_func z_in_func -# define intf z_intf -# define out_func z_out_func -# define uInt z_uInt -# define uIntf z_uIntf -# define uLong z_uLong -# define uLongf z_uLongf -# define voidp z_voidp -# define voidpc z_voidpc -# define voidpf z_voidpf - -/* all zlib structs in zlib.h and zconf.h */ -# define gz_header_s z_gz_header_s -# define internal_state z_internal_state - -#endif - -#if defined(__MSDOS__) && !defined(MSDOS) -# define MSDOS -#endif -#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) -# define OS2 -#endif -#if defined(_WINDOWS) && !defined(WINDOWS) -# define WINDOWS -#endif -#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) -# ifndef WIN32 -# define WIN32 -# endif -#endif -#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) -# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) -# ifndef SYS16BIT -# define SYS16BIT -# endif -# endif -#endif - -/* - * Compile with -DMAXSEG_64K if the alloc function cannot allocate more - * than 64k bytes at a time (needed on systems with 16-bit int). - */ -#ifdef SYS16BIT -# define MAXSEG_64K -#endif -#ifdef MSDOS -# define UNALIGNED_OK -#endif - -#ifdef __STDC_VERSION__ -# ifndef STDC -# define STDC -# endif -# if __STDC_VERSION__ >= 199901L -# ifndef STDC99 -# define STDC99 -# endif -# endif -#endif -#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) -# define STDC -#endif -#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) -# define STDC -#endif -#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) -# define STDC -#endif -#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) -# define STDC -#endif - -#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ -# define STDC -#endif - -#ifndef STDC -# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ -# define const /* note: need a more gentle solution here */ -# endif -#endif - -#if defined(ZLIB_CONST) && !defined(z_const) -# define z_const const -#else -# define z_const -#endif - -#ifdef Z_SOLO - typedef unsigned long z_size_t; -#else -# define z_longlong long long -# if defined(NO_SIZE_T) - typedef unsigned NO_SIZE_T z_size_t; -# elif defined(STDC) -# include - typedef size_t z_size_t; -# else - typedef unsigned long z_size_t; -# endif -# undef z_longlong -#endif - -/* Maximum value for memLevel in deflateInit2 */ -#ifndef MAX_MEM_LEVEL -# ifdef MAXSEG_64K -# define MAX_MEM_LEVEL 8 -# else -# define MAX_MEM_LEVEL 9 -# endif -#endif - -/* Maximum value for windowBits in deflateInit2 and inflateInit2. - * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files - * created by gzip. (Files created by minigzip can still be extracted by - * gzip.) - */ -#ifndef MAX_WBITS -# define MAX_WBITS 15 /* 32K LZ77 window */ -#endif - -/* The memory requirements for deflate are (in bytes): - (1 << (windowBits+2)) + (1 << (memLevel+9)) - that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) - plus a few kilobytes for small objects. For example, if you want to reduce - the default memory requirements from 256K to 128K, compile with - make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" - Of course this will generally degrade compression (there's no free lunch). - - The memory requirements for inflate are (in bytes) 1 << windowBits - that is, 32K for windowBits=15 (default value) plus about 7 kilobytes - for small objects. -*/ - - /* Type declarations */ - -#ifndef OF /* function prototypes */ -# ifdef STDC -# define OF(args) args -# else -# define OF(args) () -# endif -#endif - -#ifndef Z_ARG /* function prototypes for stdarg */ -# if defined(STDC) || defined(Z_HAVE_STDARG_H) -# define Z_ARG(args) args -# else -# define Z_ARG(args) () -# endif -#endif - -/* The following definitions for FAR are needed only for MSDOS mixed - * model programming (small or medium model with some far allocations). - * This was tested only with MSC; for other MSDOS compilers you may have - * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, - * just define FAR to be empty. - */ -#ifdef SYS16BIT -# if defined(M_I86SM) || defined(M_I86MM) - /* MSC small or medium model */ -# define SMALL_MEDIUM -# ifdef _MSC_VER -# define FAR _far -# else -# define FAR far -# endif -# endif -# if (defined(__SMALL__) || defined(__MEDIUM__)) - /* Turbo C small or medium model */ -# define SMALL_MEDIUM -# ifdef __BORLANDC__ -# define FAR _far -# else -# define FAR far -# endif -# endif -#endif - -#if defined(WINDOWS) || defined(WIN32) - /* If building or using zlib as a DLL, define ZLIB_DLL. - * This is not mandatory, but it offers a little performance increase. - */ -# ifdef ZLIB_DLL -# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) -# ifdef ZLIB_INTERNAL -# define ZEXTERN extern __declspec(dllexport) -# else -# define ZEXTERN extern __declspec(dllimport) -# endif -# endif -# endif /* ZLIB_DLL */ - /* If building or using zlib with the WINAPI/WINAPIV calling convention, - * define ZLIB_WINAPI. - * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. - */ -# ifdef ZLIB_WINAPI -# ifdef FAR -# undef FAR -# endif -# include - /* No need for _export, use ZLIB.DEF instead. */ - /* For complete Windows compatibility, use WINAPI, not __stdcall. */ -# define ZEXPORT WINAPI -# ifdef WIN32 -# define ZEXPORTVA WINAPIV -# else -# define ZEXPORTVA FAR CDECL -# endif -# endif -#endif - -#if defined (__BEOS__) -# ifdef ZLIB_DLL -# ifdef ZLIB_INTERNAL -# define ZEXPORT __declspec(dllexport) -# define ZEXPORTVA __declspec(dllexport) -# else -# define ZEXPORT __declspec(dllimport) -# define ZEXPORTVA __declspec(dllimport) -# endif -# endif -#endif - -#ifndef ZEXTERN -# define ZEXTERN extern -#endif -#ifndef ZEXPORT -# define ZEXPORT -#endif -#ifndef ZEXPORTVA -# define ZEXPORTVA -#endif - -#ifndef FAR -# define FAR -#endif - -#if !defined(__MACTYPES__) -typedef unsigned char Byte; /* 8 bits */ -#endif -typedef unsigned int uInt; /* 16 bits or more */ -typedef unsigned long uLong; /* 32 bits or more */ - -#ifdef SMALL_MEDIUM - /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ -# define Bytef Byte FAR -#else - typedef Byte FAR Bytef; -#endif -typedef char FAR charf; -typedef int FAR intf; -typedef uInt FAR uIntf; -typedef uLong FAR uLongf; - -#ifdef STDC - typedef void const *voidpc; - typedef void FAR *voidpf; - typedef void *voidp; -#else - typedef Byte const *voidpc; - typedef Byte FAR *voidpf; - typedef Byte *voidp; -#endif - -#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) -# include -# if (UINT_MAX == 0xffffffffUL) -# define Z_U4 unsigned -# elif (ULONG_MAX == 0xffffffffUL) -# define Z_U4 unsigned long -# elif (USHRT_MAX == 0xffffffffUL) -# define Z_U4 unsigned short -# endif -#endif - -#ifdef Z_U4 - typedef Z_U4 z_crc_t; -#else - typedef unsigned long z_crc_t; -#endif - -#if 1 /* was set to #if 1 by ./configure */ -# define Z_HAVE_UNISTD_H -#endif - -#if 1 /* was set to #if 1 by ./configure */ -# define Z_HAVE_STDARG_H -#endif - -#ifdef STDC -# ifndef Z_SOLO -# include /* for off_t */ -# endif -#endif - -#if defined(STDC) || defined(Z_HAVE_STDARG_H) -# ifndef Z_SOLO -# include /* for va_list */ -# endif -#endif - -#ifdef _WIN32 -# ifndef Z_SOLO -# include /* for wchar_t */ -# endif -#endif - -/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and - * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even - * though the former does not conform to the LFS document), but considering - * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as - * equivalently requesting no 64-bit operations - */ -#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 -# undef _LARGEFILE64_SOURCE -#endif - -#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) -# define Z_HAVE_UNISTD_H -#endif -#ifndef Z_SOLO -# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) -# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ -# ifdef VMS -# include /* for off_t */ -# endif -# ifndef z_off_t -# define z_off_t off_t -# endif -# endif -#endif - -#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 -# define Z_LFS64 -#endif - -#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) -# define Z_LARGE64 -#endif - -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) -# define Z_WANT64 -#endif - -#if !defined(SEEK_SET) && !defined(Z_SOLO) -# define SEEK_SET 0 /* Seek from beginning of file. */ -# define SEEK_CUR 1 /* Seek from current position. */ -# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ -#endif - -#ifndef z_off_t -# define z_off_t long -#endif - -#if !defined(_WIN32) && defined(Z_LARGE64) -# define z_off64_t off64_t -#else -# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) -# define z_off64_t __int64 -# else -# define z_off64_t z_off_t -# endif -#endif - -/* MVS linker does not support external names larger than 8 bytes */ -#if defined(__MVS__) - #pragma map(deflateInit_,"DEIN") - #pragma map(deflateInit2_,"DEIN2") - #pragma map(deflateEnd,"DEEND") - #pragma map(deflateBound,"DEBND") - #pragma map(inflateInit_,"ININ") - #pragma map(inflateInit2_,"ININ2") - #pragma map(inflateEnd,"INEND") - #pragma map(inflateSync,"INSY") - #pragma map(inflateSetDictionary,"INSEDI") - #pragma map(compressBound,"CMBND") - #pragma map(inflate_table,"INTABL") - #pragma map(inflate_fast,"INFA") - #pragma map(inflate_copyright,"INCOPY") -#endif - -#endif /* ZCONF_H */ diff --git a/dist/darwin_amd64/include/zdict.h b/dist/darwin_amd64/include/zdict.h deleted file mode 100644 index ff2e77f..0000000 --- a/dist/darwin_amd64/include/zdict.h +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef DICTBUILDER_H_001 -#define DICTBUILDER_H_001 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/*====== Dependencies ======*/ -#include /* size_t */ - - -/* ===== ZDICTLIB_API : control library symbols visibility ===== */ -#ifndef ZDICTLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define ZDICTLIB_VISIBILITY -# endif -#endif -#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY -#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define ZDICTLIB_API ZDICTLIB_VISIBILITY -#endif - - -/*! ZDICT_trainFromBuffer(): - * Train a dictionary from an array of samples. - * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, - * f=20, and accel=1. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * Note: Dictionary training will fail if there are not enough samples to construct a - * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). - * If dictionary training fails, you should use zstd without a dictionary, as the dictionary - * would've been ineffective anyways. If you believe your samples would benefit from a dictionary - * please open an issue with details, and we can look into it. - * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples); - -typedef struct { - int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ - unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) */ -} ZDICT_params_t; - -/*! ZDICT_finalizeDictionary(): - * Given a custom content as a basis for dictionary, and a set of samples, - * finalize dictionary by adding headers and statistics according to the zstd - * dictionary format. - * - * Samples must be stored concatenated in a flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each - * sample in order. The samples are used to construct the statistics, so they - * should be representative of what you will compress with this dictionary. - * - * The compression level can be set in `parameters`. You should pass the - * compression level you expect to use in production. The statistics for each - * compression level differ, so tuning the dictionary for the compression level - * can help quite a bit. - * - * You can set an explicit dictionary ID in `parameters`, or allow us to pick - * a random dictionary ID for you, but we can't guarantee no collisions. - * - * The dstDictBuffer and the dictContent may overlap, and the content will be - * appended to the end of the header. If the header + the content doesn't fit in - * maxDictSize the beginning of the content is truncated to make room, since it - * is presumed that the most profitable content is at the end of the dictionary, - * since that is the cheapest to reference. - * - * `dictContentSize` must be >= ZDICT_CONTENTSIZE_MIN bytes. - * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). - * - * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), - * or an error code, which can be tested by ZDICT_isError(). - * Note: ZDICT_finalizeDictionary() will push notifications into stderr if - * instructed to, using notificationLevel>0. - * NOTE: This function currently may fail in several edge cases including: - * * Not enough samples - * * Samples are uncompressible - * * Samples are all exactly the same - */ -ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize, - const void* dictContent, size_t dictContentSize, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_params_t parameters); - - -/*====== Helper functions ======*/ -ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ -ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */ -ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); -ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); - - - -#ifdef ZDICT_STATIC_LINKING_ONLY - -/* ==================================================================================== - * The definitions in this section are considered experimental. - * They should never be used with a dynamic library, as they may change in the future. - * They are provided for advanced usages. - * Use them only in association with static linking. - * ==================================================================================== */ - -#define ZDICT_CONTENTSIZE_MIN 128 -#define ZDICT_DICTSIZE_MIN 256 - -/*! ZDICT_cover_params_t: - * k and d are the only required parameters. - * For others, value 0 means default. - */ -typedef struct { - unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ - unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ - unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ - unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ - ZDICT_params_t zParams; -} ZDICT_cover_params_t; - -typedef struct { - unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ - unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ - unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ - unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ - unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ - unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ - - ZDICT_params_t zParams; -} ZDICT_fastCover_params_t; - -/*! ZDICT_trainFromBuffer_cover(): - * Train a dictionary from an array of samples using the COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( - void *dictBuffer, size_t dictBufferCapacity, - const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - ZDICT_cover_params_t parameters); - -/*! ZDICT_optimizeTrainFromBuffer_cover(): - * The same requirements as above hold for all the parameters except `parameters`. - * This function tries many parameter combinations and picks the best parameters. - * `*parameters` is filled with the best parameters found, - * dictionary constructed with those parameters is stored in `dictBuffer`. - * - * All of the parameters d, k, steps are optional. - * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. - * if steps is zero it defaults to its default value. - * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * On success `*parameters` contains the parameters selected. - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. - */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( - void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_cover_params_t* parameters); - -/*! ZDICT_trainFromBuffer_fastCover(): - * Train a dictionary from an array of samples using a modified version of COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * d and k are required. - * All other parameters are optional, will use default values if not provided - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, - size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t parameters); - -/*! ZDICT_optimizeTrainFromBuffer_fastCover(): - * The same requirements as above hold for all the parameters except `parameters`. - * This function tries many parameter combinations (specifically, k and d combinations) - * and picks the best parameters. `*parameters` is filled with the best parameters found, - * dictionary constructed with those parameters is stored in `dictBuffer`. - * All of the parameters d, k, steps, f, and accel are optional. - * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. - * if steps is zero it defaults to its default value. - * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. - * If f is zero, default value of 20 is used. - * If accel is zero, default value of 1 is used. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * On success `*parameters` contains the parameters selected. - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. - */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, - size_t dictBufferCapacity, const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t* parameters); - -typedef struct { - unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ - ZDICT_params_t zParams; -} ZDICT_legacy_params_t; - -/*! ZDICT_trainFromBuffer_legacy(): - * Train a dictionary from an array of samples. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * `parameters` is optional and can be provided with values set to 0 to mean "default". - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( - void *dictBuffer, size_t dictBufferCapacity, - const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - ZDICT_legacy_params_t parameters); - -/* Deprecation warnings */ -/* It is generally possible to disable deprecation warnings from compiler, - for example with -Wno-deprecated-declarations for gcc - or _CRT_SECURE_NO_WARNINGS in Visual. - Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ -#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS -# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ -#else -# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API -# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) -# elif (ZDICT_GCC_VERSION >= 301) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) -# elif defined(_MSC_VER) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) -# else -# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") -# define ZDICT_DEPRECATED(message) ZDICTLIB_API -# endif -#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ - -ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") -size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); - - -#endif /* ZDICT_STATIC_LINKING_ONLY */ - -#if defined (__cplusplus) -} -#endif - -#endif /* DICTBUILDER_H_001 */ diff --git a/dist/darwin_amd64/include/zlib.h b/dist/darwin_amd64/include/zlib.h deleted file mode 100644 index f09cdaf..0000000 --- a/dist/darwin_amd64/include/zlib.h +++ /dev/null @@ -1,1912 +0,0 @@ -/* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.11, January 15th, 2017 - - Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - - - The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 - (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). -*/ - -#ifndef ZLIB_H -#define ZLIB_H - -#include "zconf.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define ZLIB_VERSION "1.2.11" -#define ZLIB_VERNUM 0x12b0 -#define ZLIB_VER_MAJOR 1 -#define ZLIB_VER_MINOR 2 -#define ZLIB_VER_REVISION 11 -#define ZLIB_VER_SUBREVISION 0 - -/* - The 'zlib' compression library provides in-memory compression and - decompression functions, including integrity checks of the uncompressed data. - This version of the library supports only one compression method (deflation) - but other algorithms will be added later and will have the same stream - interface. - - Compression can be done in a single step if the buffers are large enough, - or can be done by repeated calls of the compression function. In the latter - case, the application must provide more input and/or consume the output - (providing more output space) before each call. - - The compressed data format used by default by the in-memory functions is - the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped - around a deflate stream, which is itself documented in RFC 1951. - - The library also supports reading and writing files in gzip (.gz) format - with an interface similar to that of stdio using the functions that start - with "gz". The gzip format is different from the zlib format. gzip is a - gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. - - This library can optionally read and write gzip and raw deflate streams in - memory as well. - - The zlib format was designed to be compact and fast for use in memory - and on communications channels. The gzip format was designed for single- - file compression on file systems, has a larger header than zlib to maintain - directory information, and uses a different, slower check method than zlib. - - The library does not install any signal handler. The decoder checks - the consistency of the compressed data, so the library should never crash - even in the case of corrupted input. -*/ - -typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); -typedef void (*free_func) OF((voidpf opaque, voidpf address)); - -struct internal_state; - -typedef struct z_stream_s { - z_const Bytef *next_in; /* next input byte */ - uInt avail_in; /* number of bytes available at next_in */ - uLong total_in; /* total number of input bytes read so far */ - - Bytef *next_out; /* next output byte will go here */ - uInt avail_out; /* remaining free space at next_out */ - uLong total_out; /* total number of bytes output so far */ - - z_const char *msg; /* last error message, NULL if no error */ - struct internal_state FAR *state; /* not visible by applications */ - - alloc_func zalloc; /* used to allocate the internal state */ - free_func zfree; /* used to free the internal state */ - voidpf opaque; /* private data object passed to zalloc and zfree */ - - int data_type; /* best guess about the data type: binary or text - for deflate, or the decoding state for inflate */ - uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ - uLong reserved; /* reserved for future use */ -} z_stream; - -typedef z_stream FAR *z_streamp; - -/* - gzip header information passed to and from zlib routines. See RFC 1952 - for more details on the meanings of these fields. -*/ -typedef struct gz_header_s { - int text; /* true if compressed data believed to be text */ - uLong time; /* modification time */ - int xflags; /* extra flags (not used when writing a gzip file) */ - int os; /* operating system */ - Bytef *extra; /* pointer to extra field or Z_NULL if none */ - uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ - uInt extra_max; /* space at extra (only when reading header) */ - Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ - uInt name_max; /* space at name (only when reading header) */ - Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ - uInt comm_max; /* space at comment (only when reading header) */ - int hcrc; /* true if there was or will be a header crc */ - int done; /* true when done reading gzip header (not used - when writing a gzip file) */ -} gz_header; - -typedef gz_header FAR *gz_headerp; - -/* - The application must update next_in and avail_in when avail_in has dropped - to zero. It must update next_out and avail_out when avail_out has dropped - to zero. The application must initialize zalloc, zfree and opaque before - calling the init function. All other fields are set by the compression - library and must not be updated by the application. - - The opaque value provided by the application will be passed as the first - parameter for calls of zalloc and zfree. This can be useful for custom - memory management. The compression library attaches no meaning to the - opaque value. - - zalloc must return Z_NULL if there is not enough memory for the object. - If zlib is used in a multi-threaded application, zalloc and zfree must be - thread safe. In that case, zlib is thread-safe. When zalloc and zfree are - Z_NULL on entry to the initialization function, they are set to internal - routines that use the standard library functions malloc() and free(). - - On 16-bit systems, the functions zalloc and zfree must be able to allocate - exactly 65536 bytes, but will not be required to allocate more than this if - the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers - returned by zalloc for objects of exactly 65536 bytes *must* have their - offset normalized to zero. The default allocation function provided by this - library ensures this (see zutil.c). To reduce memory requirements and avoid - any allocation of 64K objects, at the expense of compression ratio, compile - the library with -DMAX_WBITS=14 (see zconf.h). - - The fields total_in and total_out can be used for statistics or progress - reports. After compression, total_in holds the total size of the - uncompressed data and may be saved for use by the decompressor (particularly - if the decompressor wants to decompress everything in a single step). -*/ - - /* constants */ - -#define Z_NO_FLUSH 0 -#define Z_PARTIAL_FLUSH 1 -#define Z_SYNC_FLUSH 2 -#define Z_FULL_FLUSH 3 -#define Z_FINISH 4 -#define Z_BLOCK 5 -#define Z_TREES 6 -/* Allowed flush values; see deflate() and inflate() below for details */ - -#define Z_OK 0 -#define Z_STREAM_END 1 -#define Z_NEED_DICT 2 -#define Z_ERRNO (-1) -#define Z_STREAM_ERROR (-2) -#define Z_DATA_ERROR (-3) -#define Z_MEM_ERROR (-4) -#define Z_BUF_ERROR (-5) -#define Z_VERSION_ERROR (-6) -/* Return codes for the compression/decompression functions. Negative values - * are errors, positive values are used for special but normal events. - */ - -#define Z_NO_COMPRESSION 0 -#define Z_BEST_SPEED 1 -#define Z_BEST_COMPRESSION 9 -#define Z_DEFAULT_COMPRESSION (-1) -/* compression levels */ - -#define Z_FILTERED 1 -#define Z_HUFFMAN_ONLY 2 -#define Z_RLE 3 -#define Z_FIXED 4 -#define Z_DEFAULT_STRATEGY 0 -/* compression strategy; see deflateInit2() below for details */ - -#define Z_BINARY 0 -#define Z_TEXT 1 -#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ -#define Z_UNKNOWN 2 -/* Possible values of the data_type field for deflate() */ - -#define Z_DEFLATED 8 -/* The deflate compression method (the only one supported in this version) */ - -#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ - -#define zlib_version zlibVersion() -/* for compatibility with versions < 1.0.2 */ - - - /* basic functions */ - -ZEXTERN const char * ZEXPORT zlibVersion OF((void)); -/* The application can compare zlibVersion and ZLIB_VERSION for consistency. - If the first character differs, the library code actually used is not - compatible with the zlib.h header file used by the application. This check - is automatically made by deflateInit and inflateInit. - */ - -/* -ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); - - Initializes the internal stream state for compression. The fields - zalloc, zfree and opaque must be initialized before by the caller. If - zalloc and zfree are set to Z_NULL, deflateInit updates them to use default - allocation functions. - - The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: - 1 gives best speed, 9 gives best compression, 0 gives no compression at all - (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION - requests a default compromise between speed and compression (currently - equivalent to level 6). - - deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if level is not a valid compression level, or - Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible - with the version assumed by the caller (ZLIB_VERSION). msg is set to null - if there is no error message. deflateInit does not perform any compression: - this will be done by deflate(). -*/ - - -ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); -/* - deflate compresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. deflate performs one or both of the - following actions: - - - Compress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in and avail_in are updated and - processing will resume at this point for the next call of deflate(). - - - Generate more output starting at next_out and update next_out and avail_out - accordingly. This action is forced if the parameter flush is non zero. - Forcing flush frequently degrades the compression ratio, so this parameter - should be set only when necessary. Some output may be provided even if - flush is zero. - - Before the call of deflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming more - output, and updating avail_in or avail_out accordingly; avail_out should - never be zero before the call. The application can consume the compressed - output when it wants, for example when the output buffer is full (avail_out - == 0), or after each call of deflate(). If deflate returns Z_OK and with - zero avail_out, it must be called again after making room in the output - buffer because there might be more output pending. See deflatePending(), - which can be used if desired to determine whether or not there is more ouput - in that case. - - Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to - decide how much data to accumulate before producing output, in order to - maximize compression. - - If the parameter flush is set to Z_SYNC_FLUSH, all pending output is - flushed to the output buffer and the output is aligned on a byte boundary, so - that the decompressor can get all input data available so far. (In - particular avail_in is zero after the call if enough output space has been - provided before the call.) Flushing may degrade compression for some - compression algorithms and so it should be used only when necessary. This - completes the current deflate block and follows it with an empty stored block - that is three bits plus filler bits to the next byte, followed by four bytes - (00 00 ff ff). - - If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the - output buffer, but the output is not aligned to a byte boundary. All of the - input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. - This completes the current deflate block and follows it with an empty fixed - codes block that is 10 bits long. This assures that enough bytes are output - in order for the decompressor to finish the block before the empty fixed - codes block. - - If flush is set to Z_BLOCK, a deflate block is completed and emitted, as - for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to - seven bits of the current block are held to be written as the next byte after - the next deflate block is completed. In this case, the decompressor may not - be provided enough bits at this point in order to complete decompression of - the data provided so far to the compressor. It may need to wait for the next - block to be emitted. This is for advanced applications that need to control - the emission of deflate blocks. - - If flush is set to Z_FULL_FLUSH, all output is flushed as with - Z_SYNC_FLUSH, and the compression state is reset so that decompression can - restart from this point if previous compressed data has been damaged or if - random access is desired. Using Z_FULL_FLUSH too often can seriously degrade - compression. - - If deflate returns with avail_out == 0, this function must be called again - with the same value of the flush parameter and more output space (updated - avail_out), until the flush is complete (deflate returns with non-zero - avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that - avail_out is greater than six to avoid repeated flush markers due to - avail_out == 0 on return. - - If the parameter flush is set to Z_FINISH, pending input is processed, - pending output is flushed and deflate returns with Z_STREAM_END if there was - enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this - function must be called again with Z_FINISH and more output space (updated - avail_out) but no more input data, until it returns with Z_STREAM_END or an - error. After deflate has returned Z_STREAM_END, the only possible operations - on the stream are deflateReset or deflateEnd. - - Z_FINISH can be used in the first deflate call after deflateInit if all the - compression is to be done in a single step. In order to complete in one - call, avail_out must be at least the value returned by deflateBound (see - below). Then deflate is guaranteed to return Z_STREAM_END. If not enough - output space is provided, deflate will not return Z_STREAM_END, and it must - be called again as described above. - - deflate() sets strm->adler to the Adler-32 checksum of all input read - so far (that is, total_in bytes). If a gzip stream is being generated, then - strm->adler will be the CRC-32 checksum of the input read so far. (See - deflateInit2 below.) - - deflate() may update strm->data_type if it can make a good guess about - the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is - considered binary. This field is only for information purposes and does not - affect the compression algorithm in any manner. - - deflate() returns Z_OK if some progress has been made (more input - processed or more output produced), Z_STREAM_END if all input has been - consumed and all output has been produced (only when flush is set to - Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example - if next_in or next_out was Z_NULL or the state was inadvertently written over - by the application), or Z_BUF_ERROR if no progress is possible (for example - avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and - deflate() can be called again with more input and more output space to - continue compressing. -*/ - - -ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); -/* - All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any pending - output. - - deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the - stream state was inconsistent, Z_DATA_ERROR if the stream was freed - prematurely (some input or output was discarded). In the error case, msg - may be set but then points to a static string (which must not be - deallocated). -*/ - - -/* -ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); - - Initializes the internal stream state for decompression. The fields - next_in, avail_in, zalloc, zfree and opaque must be initialized before by - the caller. In the current version of inflate, the provided input is not - read or consumed. The allocation of a sliding window will be deferred to - the first call of inflate (if the decompression does not complete on the - first call). If zalloc and zfree are set to Z_NULL, inflateInit updates - them to use default allocation functions. - - inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller, or Z_STREAM_ERROR if the parameters are - invalid, such as a null pointer to the structure. msg is set to null if - there is no error message. inflateInit does not perform any decompression. - Actual decompression will be done by inflate(). So next_in, and avail_in, - next_out, and avail_out are unused and unchanged. The current - implementation of inflateInit() does not process any header information -- - that is deferred until inflate() is called. -*/ - - -ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); -/* - inflate decompresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. inflate performs one or both of the - following actions: - - - Decompress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), then next_in and avail_in are updated - accordingly, and processing will resume at this point for the next call of - inflate(). - - - Generate more output starting at next_out and update next_out and avail_out - accordingly. inflate() provides as much output as possible, until there is - no more input data or no more space in the output buffer (see below about - the flush parameter). - - Before the call of inflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming more - output, and updating the next_* and avail_* values accordingly. If the - caller of inflate() does not provide both available input and available - output space, it is possible that there will be no progress made. The - application can consume the uncompressed output when it wants, for example - when the output buffer is full (avail_out == 0), or after each call of - inflate(). If inflate returns Z_OK and with zero avail_out, it must be - called again after making room in the output buffer because there might be - more output pending. - - The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, - Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much - output as possible to the output buffer. Z_BLOCK requests that inflate() - stop if and when it gets to the next deflate block boundary. When decoding - the zlib or gzip format, this will cause inflate() to return immediately - after the header and before the first block. When doing a raw inflate, - inflate() will go ahead and process the first block, and will return when it - gets to the end of that block, or when it runs out of data. - - The Z_BLOCK option assists in appending to or combining deflate streams. - To assist in this, on return inflate() always sets strm->data_type to the - number of unused bits in the last byte taken from strm->next_in, plus 64 if - inflate() is currently decoding the last block in the deflate stream, plus - 128 if inflate() returned immediately after decoding an end-of-block code or - decoding the complete header up to just before the first byte of the deflate - stream. The end-of-block will not be indicated until all of the uncompressed - data from that block has been written to strm->next_out. The number of - unused bits may in general be greater than seven, except when bit 7 of - data_type is set, in which case the number of unused bits will be less than - eight. data_type is set as noted here every time inflate() returns for all - flush options, and so can be used to determine the amount of currently - consumed input in bits. - - The Z_TREES option behaves as Z_BLOCK does, but it also returns when the - end of each deflate block header is reached, before any actual data in that - block is decoded. This allows the caller to determine the length of the - deflate block header for later use in random access within a deflate block. - 256 is added to the value of strm->data_type when inflate() returns - immediately after reaching the end of the deflate block header. - - inflate() should normally be called until it returns Z_STREAM_END or an - error. However if all decompression is to be performed in a single step (a - single call of inflate), the parameter flush should be set to Z_FINISH. In - this case all pending input is processed and all pending output is flushed; - avail_out must be large enough to hold all of the uncompressed data for the - operation to complete. (The size of the uncompressed data may have been - saved by the compressor for this purpose.) The use of Z_FINISH is not - required to perform an inflation in one step. However it may be used to - inform inflate that a faster approach can be used for the single inflate() - call. Z_FINISH also informs inflate to not maintain a sliding window if the - stream completes, which reduces inflate's memory footprint. If the stream - does not complete, either because not all of the stream is provided or not - enough output space is provided, then a sliding window will be allocated and - inflate() can be called again to continue the operation as if Z_NO_FLUSH had - been used. - - In this implementation, inflate() always flushes as much output as - possible to the output buffer, and always uses the faster approach on the - first call. So the effects of the flush parameter in this implementation are - on the return value of inflate() as noted below, when inflate() returns early - when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of - memory for a sliding window when Z_FINISH is used. - - If a preset dictionary is needed after this call (see inflateSetDictionary - below), inflate sets strm->adler to the Adler-32 checksum of the dictionary - chosen by the compressor and returns Z_NEED_DICT; otherwise it sets - strm->adler to the Adler-32 checksum of all output produced so far (that is, - total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described - below. At the end of the stream, inflate() checks that its computed Adler-32 - checksum is equal to that saved by the compressor and returns Z_STREAM_END - only if the checksum is correct. - - inflate() can decompress and check either zlib-wrapped or gzip-wrapped - deflate data. The header type is detected automatically, if requested when - initializing with inflateInit2(). Any information contained in the gzip - header is not retained unless inflateGetHeader() is used. When processing - gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output - produced so far. The CRC-32 is checked against the gzip trailer, as is the - uncompressed length, modulo 2^32. - - inflate() returns Z_OK if some progress has been made (more input processed - or more output produced), Z_STREAM_END if the end of the compressed data has - been reached and all uncompressed output has been produced, Z_NEED_DICT if a - preset dictionary is needed at this point, Z_DATA_ERROR if the input data was - corrupted (input stream not conforming to the zlib format or incorrect check - value, in which case strm->msg points to a string with a more specific - error), Z_STREAM_ERROR if the stream structure was inconsistent (for example - next_in or next_out was Z_NULL, or the state was inadvertently written over - by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR - if no progress was possible or if there was not enough room in the output - buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and - inflate() can be called again with more input and more output space to - continue decompressing. If Z_DATA_ERROR is returned, the application may - then call inflateSync() to look for a good compression block if a partial - recovery of the data is to be attempted. -*/ - - -ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); -/* - All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any pending - output. - - inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state - was inconsistent. -*/ - - - /* Advanced functions */ - -/* - The following functions are needed only in some special applications. -*/ - -/* -ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, - int level, - int method, - int windowBits, - int memLevel, - int strategy)); - - This is another version of deflateInit with more compression options. The - fields next_in, zalloc, zfree and opaque must be initialized before by the - caller. - - The method parameter is the compression method. It must be Z_DEFLATED in - this version of the library. - - The windowBits parameter is the base two logarithm of the window size - (the size of the history buffer). It should be in the range 8..15 for this - version of the library. Larger values of this parameter result in better - compression at the expense of memory usage. The default value is 15 if - deflateInit is used instead. - - For the current implementation of deflate(), a windowBits value of 8 (a - window size of 256 bytes) is not supported. As a result, a request for 8 - will result in 9 (a 512-byte window). In that case, providing 8 to - inflateInit2() will result in an error when the zlib header with 9 is - checked against the initialization of inflate(). The remedy is to not use 8 - with deflateInit2() with this initialization, or at least in that case use 9 - with inflateInit2(). - - windowBits can also be -8..-15 for raw deflate. In this case, -windowBits - determines the window size. deflate() will then generate raw deflate data - with no zlib header or trailer, and will not compute a check value. - - windowBits can also be greater than 15 for optional gzip encoding. Add - 16 to windowBits to write a simple gzip header and trailer around the - compressed data instead of a zlib wrapper. The gzip header will have no - file name, no extra data, no comment, no modification time (set to zero), no - header crc, and the operating system will be set to the appropriate value, - if the operating system was determined at compile time. If a gzip stream is - being written, strm->adler is a CRC-32 instead of an Adler-32. - - For raw deflate or gzip encoding, a request for a 256-byte window is - rejected as invalid, since only the zlib header provides a means of - transmitting the window size to the decompressor. - - The memLevel parameter specifies how much memory should be allocated - for the internal compression state. memLevel=1 uses minimum memory but is - slow and reduces compression ratio; memLevel=9 uses maximum memory for - optimal speed. The default value is 8. See zconf.h for total memory usage - as a function of windowBits and memLevel. - - The strategy parameter is used to tune the compression algorithm. Use the - value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a - filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no - string match), or Z_RLE to limit match distances to one (run-length - encoding). Filtered data consists mostly of small values with a somewhat - random distribution. In this case, the compression algorithm is tuned to - compress them better. The effect of Z_FILTERED is to force more Huffman - coding and less string matching; it is somewhat intermediate between - Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as - fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The - strategy parameter only affects the compression ratio but not the - correctness of the compressed output even if it is not set appropriately. - Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler - decoder for special applications. - - deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid - method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is - incompatible with the version assumed by the caller (ZLIB_VERSION). msg is - set to null if there is no error message. deflateInit2 does not perform any - compression: this will be done by deflate(). -*/ - -ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, - const Bytef *dictionary, - uInt dictLength)); -/* - Initializes the compression dictionary from the given byte sequence - without producing any compressed output. When using the zlib format, this - function must be called immediately after deflateInit, deflateInit2 or - deflateReset, and before any call of deflate. When doing raw deflate, this - function must be called either before any call of deflate, or immediately - after the completion of a deflate block, i.e. after all input has been - consumed and all output has been delivered when using any of the flush - options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The - compressor and decompressor must use exactly the same dictionary (see - inflateSetDictionary). - - The dictionary should consist of strings (byte sequences) that are likely - to be encountered later in the data to be compressed, with the most commonly - used strings preferably put towards the end of the dictionary. Using a - dictionary is most useful when the data to be compressed is short and can be - predicted with good accuracy; the data can then be compressed better than - with the default empty dictionary. - - Depending on the size of the compression data structures selected by - deflateInit or deflateInit2, a part of the dictionary may in effect be - discarded, for example if the dictionary is larger than the window size - provided in deflateInit or deflateInit2. Thus the strings most likely to be - useful should be put at the end of the dictionary, not at the front. In - addition, the current implementation of deflate will use at most the window - size minus 262 bytes of the provided dictionary. - - Upon return of this function, strm->adler is set to the Adler-32 value - of the dictionary; the decompressor may later use this value to determine - which dictionary has been used by the compressor. (The Adler-32 value - applies to the whole dictionary even if only a subset of the dictionary is - actually used by the compressor.) If a raw deflate was requested, then the - Adler-32 value is not computed and strm->adler is not set. - - deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a - parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is - inconsistent (for example if deflate has already been called for this stream - or if not at a block boundary for raw deflate). deflateSetDictionary does - not perform any compression: this will be done by deflate(). -*/ - -ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm, - Bytef *dictionary, - uInt *dictLength)); -/* - Returns the sliding dictionary being maintained by deflate. dictLength is - set to the number of bytes in the dictionary, and that many bytes are copied - to dictionary. dictionary must have enough space, where 32768 bytes is - always enough. If deflateGetDictionary() is called with dictionary equal to - Z_NULL, then only the dictionary length is returned, and nothing is copied. - Similary, if dictLength is Z_NULL, then it is not set. - - deflateGetDictionary() may return a length less than the window size, even - when more than the window size in input has been provided. It may return up - to 258 bytes less in that case, due to how zlib's implementation of deflate - manages the sliding window and lookahead for matches, where matches can be - up to 258 bytes long. If the application needs the last window-size bytes of - input, then that would need to be saved by the application outside of zlib. - - deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the - stream state is inconsistent. -*/ - -ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, - z_streamp source)); -/* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when several compression strategies will be - tried, for example when there are several ways of pre-processing the input - data with a filter. The streams that will be discarded should then be freed - by calling deflateEnd. Note that deflateCopy duplicates the internal - compression state which can be quite large, so this strategy is slow and can - consume lots of memory. - - deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being Z_NULL). msg is left unchanged in both source and - destination. -*/ - -ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); -/* - This function is equivalent to deflateEnd followed by deflateInit, but - does not free and reallocate the internal compression state. The stream - will leave the compression level and any other attributes that may have been - set unchanged. - - deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL). -*/ - -ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, - int level, - int strategy)); -/* - Dynamically update the compression level and compression strategy. The - interpretation of level and strategy is as in deflateInit2(). This can be - used to switch between compression and straight copy of the input data, or - to switch to a different kind of input data requiring a different strategy. - If the compression approach (which is a function of the level) or the - strategy is changed, and if any input has been consumed in a previous - deflate() call, then the input available so far is compressed with the old - level and strategy using deflate(strm, Z_BLOCK). There are three approaches - for the compression levels 0, 1..3, and 4..9 respectively. The new level - and strategy will take effect at the next call of deflate(). - - If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does - not have enough output space to complete, then the parameter change will not - take effect. In this case, deflateParams() can be called again with the - same parameters and more output space to try again. - - In order to assure a change in the parameters on the first try, the - deflate stream should be flushed using deflate() with Z_BLOCK or other flush - request until strm.avail_out is not zero, before calling deflateParams(). - Then no more input data should be provided before the deflateParams() call. - If this is done, the old level and strategy will be applied to the data - compressed before deflateParams(), and the new level and strategy will be - applied to the the data compressed after deflateParams(). - - deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream - state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if - there was not enough output space to complete the compression of the - available input data before a change in the strategy or approach. Note that - in the case of a Z_BUF_ERROR, the parameters are not changed. A return - value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be - retried with more output space. -*/ - -ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, - int good_length, - int max_lazy, - int nice_length, - int max_chain)); -/* - Fine tune deflate's internal compression parameters. This should only be - used by someone who understands the algorithm used by zlib's deflate for - searching for the best matching string, and even then only by the most - fanatic optimizer trying to squeeze out the last compressed bit for their - specific input data. Read the deflate.c source code for the meaning of the - max_lazy, good_length, nice_length, and max_chain parameters. - - deflateTune() can be called after deflateInit() or deflateInit2(), and - returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. - */ - -ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, - uLong sourceLen)); -/* - deflateBound() returns an upper bound on the compressed size after - deflation of sourceLen bytes. It must be called after deflateInit() or - deflateInit2(), and after deflateSetHeader(), if used. This would be used - to allocate an output buffer for deflation in a single pass, and so would be - called before deflate(). If that first deflate() call is provided the - sourceLen input bytes, an output buffer allocated to the size returned by - deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed - to return Z_STREAM_END. Note that it is possible for the compressed size to - be larger than the value returned by deflateBound() if flush options other - than Z_FINISH or Z_NO_FLUSH are used. -*/ - -ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, - unsigned *pending, - int *bits)); -/* - deflatePending() returns the number of bytes and bits of output that have - been generated, but not yet provided in the available output. The bytes not - provided would be due to the available output space having being consumed. - The number of bits of output not provided are between 0 and 7, where they - await more bits to join them in order to fill out a full byte. If pending - or bits are Z_NULL, then those values are not set. - - deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. - */ - -ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, - int bits, - int value)); -/* - deflatePrime() inserts bits in the deflate output stream. The intent - is that this function is used to start off the deflate output with the bits - leftover from a previous deflate stream when appending to it. As such, this - function can only be used for raw deflate, and must be used before the first - deflate() call after a deflateInit2() or deflateReset(). bits must be less - than or equal to 16, and that many of the least significant bits of value - will be inserted in the output. - - deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough - room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the - source stream state was inconsistent. -*/ - -ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, - gz_headerp head)); -/* - deflateSetHeader() provides gzip header information for when a gzip - stream is requested by deflateInit2(). deflateSetHeader() may be called - after deflateInit2() or deflateReset() and before the first call of - deflate(). The text, time, os, extra field, name, and comment information - in the provided gz_header structure are written to the gzip header (xflag is - ignored -- the extra flags are set according to the compression level). The - caller must assure that, if not Z_NULL, name and comment are terminated with - a zero byte, and that if extra is not Z_NULL, that extra_len bytes are - available there. If hcrc is true, a gzip header crc is included. Note that - the current versions of the command-line version of gzip (up through version - 1.3.x) do not support header crc's, and will report that it is a "multi-part - gzip file" and give up. - - If deflateSetHeader is not used, the default gzip header has text false, - the time set to zero, and os set to 255, with no extra, name, or comment - fields. The gzip header is returned to the default state by deflateReset(). - - deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -/* -ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, - int windowBits)); - - This is another version of inflateInit with an extra parameter. The - fields next_in, avail_in, zalloc, zfree and opaque must be initialized - before by the caller. - - The windowBits parameter is the base two logarithm of the maximum window - size (the size of the history buffer). It should be in the range 8..15 for - this version of the library. The default value is 15 if inflateInit is used - instead. windowBits must be greater than or equal to the windowBits value - provided to deflateInit2() while compressing, or it must be equal to 15 if - deflateInit2() was not used. If a compressed stream with a larger window - size is given as input, inflate() will return with the error code - Z_DATA_ERROR instead of trying to allocate a larger window. - - windowBits can also be zero to request that inflate use the window size in - the zlib header of the compressed stream. - - windowBits can also be -8..-15 for raw inflate. In this case, -windowBits - determines the window size. inflate() will then process raw deflate data, - not looking for a zlib or gzip header, not generating a check value, and not - looking for any check values for comparison at the end of the stream. This - is for use with other formats that use the deflate compressed data format - such as zip. Those formats provide their own check values. If a custom - format is developed using the raw deflate format for compressed data, it is - recommended that a check value such as an Adler-32 or a CRC-32 be applied to - the uncompressed data as is done in the zlib, gzip, and zip formats. For - most applications, the zlib format should be used as is. Note that comments - above on the use in deflateInit2() applies to the magnitude of windowBits. - - windowBits can also be greater than 15 for optional gzip decoding. Add - 32 to windowBits to enable zlib and gzip decoding with automatic header - detection, or add 16 to decode only the gzip format (the zlib format will - return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a - CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see - below), inflate() will not automatically decode concatenated gzip streams. - inflate() will return Z_STREAM_END at the end of the gzip stream. The state - would need to be reset to continue decoding a subsequent gzip stream. - - inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller, or Z_STREAM_ERROR if the parameters are - invalid, such as a null pointer to the structure. msg is set to null if - there is no error message. inflateInit2 does not perform any decompression - apart from possibly reading the zlib header if present: actual decompression - will be done by inflate(). (So next_in and avail_in may be modified, but - next_out and avail_out are unused and unchanged.) The current implementation - of inflateInit2() does not process any header information -- that is - deferred until inflate() is called. -*/ - -ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, - const Bytef *dictionary, - uInt dictLength)); -/* - Initializes the decompression dictionary from the given uncompressed byte - sequence. This function must be called immediately after a call of inflate, - if that call returned Z_NEED_DICT. The dictionary chosen by the compressor - can be determined from the Adler-32 value returned by that call of inflate. - The compressor and decompressor must use exactly the same dictionary (see - deflateSetDictionary). For raw inflate, this function can be called at any - time to set the dictionary. If the provided dictionary is smaller than the - window and there is already data in the window, then the provided dictionary - will amend what's there. The application must insure that the dictionary - that was used for compression is provided. - - inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a - parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is - inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the - expected one (incorrect Adler-32 value). inflateSetDictionary does not - perform any decompression: this will be done by subsequent calls of - inflate(). -*/ - -ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, - Bytef *dictionary, - uInt *dictLength)); -/* - Returns the sliding dictionary being maintained by inflate. dictLength is - set to the number of bytes in the dictionary, and that many bytes are copied - to dictionary. dictionary must have enough space, where 32768 bytes is - always enough. If inflateGetDictionary() is called with dictionary equal to - Z_NULL, then only the dictionary length is returned, and nothing is copied. - Similary, if dictLength is Z_NULL, then it is not set. - - inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the - stream state is inconsistent. -*/ - -ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); -/* - Skips invalid compressed data until a possible full flush point (see above - for the description of deflate with Z_FULL_FLUSH) can be found, or until all - available input is skipped. No output is provided. - - inflateSync searches for a 00 00 FF FF pattern in the compressed data. - All full flush points have this pattern, but not all occurrences of this - pattern are full flush points. - - inflateSync returns Z_OK if a possible full flush point has been found, - Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point - has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. - In the success case, the application may save the current current value of - total_in which indicates where valid compressed data was found. In the - error case, the application may repeatedly call inflateSync, providing more - input each time, until success or end of the input data. -*/ - -ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, - z_streamp source)); -/* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when randomly accessing a large stream. The - first pass through the stream can periodically record the inflate state, - allowing restarting inflate at those points when randomly accessing the - stream. - - inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being Z_NULL). msg is left unchanged in both source and - destination. -*/ - -ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); -/* - This function is equivalent to inflateEnd followed by inflateInit, - but does not free and reallocate the internal decompression state. The - stream will keep attributes that may have been set by inflateInit2. - - inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL). -*/ - -ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, - int windowBits)); -/* - This function is the same as inflateReset, but it also permits changing - the wrap and window size requests. The windowBits parameter is interpreted - the same as it is for inflateInit2. If the window size is changed, then the - memory allocated for the window is freed, and the window will be reallocated - by inflate() if needed. - - inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL), or if - the windowBits parameter is invalid. -*/ - -ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, - int bits, - int value)); -/* - This function inserts bits in the inflate input stream. The intent is - that this function is used to start inflating at a bit position in the - middle of a byte. The provided bits will be used before any bytes are used - from next_in. This function should only be used with raw inflate, and - should be used before the first inflate() call after inflateInit2() or - inflateReset(). bits must be less than or equal to 16, and that many of the - least significant bits of value will be inserted in the input. - - If bits is negative, then the input stream bit buffer is emptied. Then - inflatePrime() can be called again to put bits in the buffer. This is used - to clear out bits leftover after feeding inflate a block description prior - to feeding inflate codes. - - inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); -/* - This function returns two values, one in the lower 16 bits of the return - value, and the other in the remaining upper bits, obtained by shifting the - return value down 16 bits. If the upper value is -1 and the lower value is - zero, then inflate() is currently decoding information outside of a block. - If the upper value is -1 and the lower value is non-zero, then inflate is in - the middle of a stored block, with the lower value equaling the number of - bytes from the input remaining to copy. If the upper value is not -1, then - it is the number of bits back from the current bit position in the input of - the code (literal or length/distance pair) currently being processed. In - that case the lower value is the number of bytes already emitted for that - code. - - A code is being processed if inflate is waiting for more input to complete - decoding of the code, or if it has completed decoding but is waiting for - more output space to write the literal or match data. - - inflateMark() is used to mark locations in the input data for random - access, which may be at bit positions, and to note those cases where the - output of a code may span boundaries of random access blocks. The current - location in the input stream can be determined from avail_in and data_type - as noted in the description for the Z_BLOCK flush parameter for inflate. - - inflateMark returns the value noted above, or -65536 if the provided - source stream state was inconsistent. -*/ - -ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, - gz_headerp head)); -/* - inflateGetHeader() requests that gzip header information be stored in the - provided gz_header structure. inflateGetHeader() may be called after - inflateInit2() or inflateReset(), and before the first call of inflate(). - As inflate() processes the gzip stream, head->done is zero until the header - is completed, at which time head->done is set to one. If a zlib stream is - being decoded, then head->done is set to -1 to indicate that there will be - no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be - used to force inflate() to return immediately after header processing is - complete and before any actual data is decompressed. - - The text, time, xflags, and os fields are filled in with the gzip header - contents. hcrc is set to true if there is a header CRC. (The header CRC - was valid if done is set to one.) If extra is not Z_NULL, then extra_max - contains the maximum number of bytes to write to extra. Once done is true, - extra_len contains the actual extra field length, and extra contains the - extra field, or that field truncated if extra_max is less than extra_len. - If name is not Z_NULL, then up to name_max characters are written there, - terminated with a zero unless the length is greater than name_max. If - comment is not Z_NULL, then up to comm_max characters are written there, - terminated with a zero unless the length is greater than comm_max. When any - of extra, name, or comment are not Z_NULL and the respective field is not - present in the header, then that field is set to Z_NULL to signal its - absence. This allows the use of deflateSetHeader() with the returned - structure to duplicate the header. However if those fields are set to - allocated memory, then the application will need to save those pointers - elsewhere so that they can be eventually freed. - - If inflateGetHeader is not used, then the header information is simply - discarded. The header is always checked for validity, including the header - CRC if present. inflateReset() will reset the process to discard the header - information. The application would need to call inflateGetHeader() again to - retrieve the header from the next gzip stream. - - inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ - -/* -ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, - unsigned char FAR *window)); - - Initialize the internal stream state for decompression using inflateBack() - calls. The fields zalloc, zfree and opaque in strm must be initialized - before the call. If zalloc and zfree are Z_NULL, then the default library- - derived memory allocation routines are used. windowBits is the base two - logarithm of the window size, in the range 8..15. window is a caller - supplied buffer of that size. Except for special applications where it is - assured that deflate was used with small window sizes, windowBits must be 15 - and a 32K byte window must be supplied to be able to decompress general - deflate streams. - - See inflateBack() for the usage of these routines. - - inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of - the parameters are invalid, Z_MEM_ERROR if the internal state could not be - allocated, or Z_VERSION_ERROR if the version of the library does not match - the version of the header file. -*/ - -typedef unsigned (*in_func) OF((void FAR *, - z_const unsigned char FAR * FAR *)); -typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); - -ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, - in_func in, void FAR *in_desc, - out_func out, void FAR *out_desc)); -/* - inflateBack() does a raw inflate with a single call using a call-back - interface for input and output. This is potentially more efficient than - inflate() for file i/o applications, in that it avoids copying between the - output and the sliding window by simply making the window itself the output - buffer. inflate() can be faster on modern CPUs when used with large - buffers. inflateBack() trusts the application to not change the output - buffer passed by the output function, at least until inflateBack() returns. - - inflateBackInit() must be called first to allocate the internal state - and to initialize the state with the user-provided window buffer. - inflateBack() may then be used multiple times to inflate a complete, raw - deflate stream with each call. inflateBackEnd() is then called to free the - allocated state. - - A raw deflate stream is one with no zlib or gzip header or trailer. - This routine would normally be used in a utility that reads zip or gzip - files and writes out uncompressed files. The utility would decode the - header and process the trailer on its own, hence this routine expects only - the raw deflate stream to decompress. This is different from the default - behavior of inflate(), which expects a zlib header and trailer around the - deflate stream. - - inflateBack() uses two subroutines supplied by the caller that are then - called by inflateBack() for input and output. inflateBack() calls those - routines until it reads a complete deflate stream and writes out all of the - uncompressed data, or until it encounters an error. The function's - parameters and return types are defined above in the in_func and out_func - typedefs. inflateBack() will call in(in_desc, &buf) which should return the - number of bytes of provided input, and a pointer to that input in buf. If - there is no input available, in() must return zero -- buf is ignored in that - case -- and inflateBack() will return a buffer error. inflateBack() will - call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. - out() should return zero on success, or non-zero on failure. If out() - returns non-zero, inflateBack() will return with an error. Neither in() nor - out() are permitted to change the contents of the window provided to - inflateBackInit(), which is also the buffer that out() uses to write from. - The length written by out() will be at most the window size. Any non-zero - amount of input may be provided by in(). - - For convenience, inflateBack() can be provided input on the first call by - setting strm->next_in and strm->avail_in. If that input is exhausted, then - in() will be called. Therefore strm->next_in must be initialized before - calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called - immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in - must also be initialized, and then if strm->avail_in is not zero, input will - initially be taken from strm->next_in[0 .. strm->avail_in - 1]. - - The in_desc and out_desc parameters of inflateBack() is passed as the - first parameter of in() and out() respectively when they are called. These - descriptors can be optionally used to pass any information that the caller- - supplied in() and out() functions need to do their job. - - On return, inflateBack() will set strm->next_in and strm->avail_in to - pass back any unused input that was provided by the last in() call. The - return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR - if in() or out() returned an error, Z_DATA_ERROR if there was a format error - in the deflate stream (in which case strm->msg is set to indicate the nature - of the error), or Z_STREAM_ERROR if the stream was not properly initialized. - In the case of Z_BUF_ERROR, an input or output error can be distinguished - using strm->next_in which will be Z_NULL only if in() returned an error. If - strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning - non-zero. (in() will always be called before out(), so strm->next_in is - assured to be defined if out() returns non-zero.) Note that inflateBack() - cannot return Z_OK. -*/ - -ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); -/* - All memory allocated by inflateBackInit() is freed. - - inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream - state was inconsistent. -*/ - -ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); -/* Return flags indicating compile-time options. - - Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: - 1.0: size of uInt - 3.2: size of uLong - 5.4: size of voidpf (pointer) - 7.6: size of z_off_t - - Compiler, assembler, and debug options: - 8: ZLIB_DEBUG - 9: ASMV or ASMINF -- use ASM code - 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention - 11: 0 (reserved) - - One-time table building (smaller code, but not thread-safe if true): - 12: BUILDFIXED -- build static block decoding tables when needed - 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed - 14,15: 0 (reserved) - - Library content (indicates missing functionality): - 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking - deflate code when not needed) - 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect - and decode gzip streams (to avoid linking crc code) - 18-19: 0 (reserved) - - Operation variations (changes in library functionality): - 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate - 21: FASTEST -- deflate algorithm with only one, lowest compression level - 22,23: 0 (reserved) - - The sprintf variant used by gzprintf (zero is best): - 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format - 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! - 26: 0 = returns value, 1 = void -- 1 means inferred string length returned - - Remainder: - 27-31: 0 (reserved) - */ - -#ifndef Z_SOLO - - /* utility functions */ - -/* - The following utility functions are implemented on top of the basic - stream-oriented functions. To simplify the interface, some default options - are assumed (compression level and memory usage, standard memory allocation - functions). The source code of these utility functions can be modified if - you need special options. -*/ - -ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen)); -/* - Compresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total size - of the destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed data. compress() is equivalent to compress2() with a level - parameter of Z_DEFAULT_COMPRESSION. - - compress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer. -*/ - -ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen, - int level)); -/* - Compresses the source buffer into the destination buffer. The level - parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the - destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed data. - - compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_BUF_ERROR if there was not enough room in the output buffer, - Z_STREAM_ERROR if the level parameter is invalid. -*/ - -ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); -/* - compressBound() returns an upper bound on the compressed size after - compress() or compress2() on sourceLen bytes. It would be used before a - compress() or compress2() call to allocate the destination buffer. -*/ - -ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen)); -/* - Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total size - of the destination buffer, which must be large enough to hold the entire - uncompressed data. (The size of the uncompressed data must have been saved - previously by the compressor and transmitted to the decompressor by some - mechanism outside the scope of this compression library.) Upon exit, destLen - is the actual size of the uncompressed data. - - uncompress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In - the case where there is not enough room, uncompress() will fill the output - buffer with the uncompressed data up to that point. -*/ - -ZEXTERN int ZEXPORT uncompress2 OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong *sourceLen)); -/* - Same as uncompress, except that sourceLen is a pointer, where the - length of the source is *sourceLen. On return, *sourceLen is the number of - source bytes consumed. -*/ - - /* gzip file access functions */ - -/* - This library supports reading and writing files in gzip (.gz) format with - an interface similar to that of stdio, using the functions that start with - "gz". The gzip format is different from the zlib format. gzip is a gzip - wrapper, documented in RFC 1952, wrapped around a deflate stream. -*/ - -typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ - -/* -ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); - - Opens a gzip (.gz) file for reading or writing. The mode parameter is as - in fopen ("rb" or "wb") but can also include a compression level ("wb9") or - a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only - compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' - for fixed code compression as in "wb9F". (See the description of - deflateInit2 for more information about the strategy parameter.) 'T' will - request transparent writing or appending with no compression and not using - the gzip format. - - "a" can be used instead of "w" to request that the gzip stream that will - be written be appended to the file. "+" will result in an error, since - reading and writing to the same gzip file is not supported. The addition of - "x" when writing will create the file exclusively, which fails if the file - already exists. On systems that support it, the addition of "e" when - reading or writing will set the flag to close the file on an execve() call. - - These functions, as well as gzip, will read and decode a sequence of gzip - streams in a file. The append function of gzopen() can be used to create - such a file. (Also see gzflush() for another way to do this.) When - appending, gzopen does not test whether the file begins with a gzip stream, - nor does it look for the end of the gzip streams to begin appending. gzopen - will simply append a gzip stream to the existing file. - - gzopen can be used to read a file which is not in gzip format; in this - case gzread will directly read from the file without decompression. When - reading, this will be detected automatically by looking for the magic two- - byte gzip header. - - gzopen returns NULL if the file could not be opened, if there was - insufficient memory to allocate the gzFile state, or if an invalid mode was - specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). - errno can be checked to determine if the reason gzopen failed was that the - file could not be opened. -*/ - -ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); -/* - gzdopen associates a gzFile with the file descriptor fd. File descriptors - are obtained from calls like open, dup, creat, pipe or fileno (if the file - has been previously opened with fopen). The mode parameter is as in gzopen. - - The next call of gzclose on the returned gzFile will also close the file - descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor - fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, - mode);. The duplicated descriptor should be saved to avoid a leak, since - gzdopen does not close fd if it fails. If you are using fileno() to get the - file descriptor from a FILE *, then you will have to use dup() to avoid - double-close()ing the file descriptor. Both gzclose() and fclose() will - close the associated file descriptor, so they need to have different file - descriptors. - - gzdopen returns NULL if there was insufficient memory to allocate the - gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not - provided, or '+' was provided), or if fd is -1. The file descriptor is not - used until the next gz* read, write, seek, or close operation, so gzdopen - will not detect if fd is invalid (unless fd is -1). -*/ - -ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); -/* - Set the internal buffer size used by this library's functions. The - default buffer size is 8192 bytes. This function must be called after - gzopen() or gzdopen(), and before any other calls that read or write the - file. The buffer memory allocation is always deferred to the first read or - write. Three times that size in buffer space is allocated. A larger buffer - size of, for example, 64K or 128K bytes will noticeably increase the speed - of decompression (reading). - - The new buffer size also affects the maximum length for gzprintf(). - - gzbuffer() returns 0 on success, or -1 on failure, such as being called - too late. -*/ - -ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); -/* - Dynamically update the compression level or strategy. See the description - of deflateInit2 for the meaning of these parameters. Previously provided - data is flushed before the parameter change. - - gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not - opened for writing, Z_ERRNO if there is an error writing the flushed data, - or Z_MEM_ERROR if there is a memory allocation error. -*/ - -ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); -/* - Reads the given number of uncompressed bytes from the compressed file. If - the input file is not in gzip format, gzread copies the given number of - bytes into the buffer directly from the file. - - After reaching the end of a gzip stream in the input, gzread will continue - to read, looking for another gzip stream. Any number of gzip streams may be - concatenated in the input file, and will all be decompressed by gzread(). - If something other than a gzip stream is encountered after a gzip stream, - that remaining trailing garbage is ignored (and no error is returned). - - gzread can be used to read a gzip file that is being concurrently written. - Upon reaching the end of the input, gzread will return with the available - data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then - gzclearerr can be used to clear the end of file indicator in order to permit - gzread to be tried again. Z_OK indicates that a gzip stream was completed - on the last gzread. Z_BUF_ERROR indicates that the input file ended in the - middle of a gzip stream. Note that gzread does not return -1 in the event - of an incomplete gzip stream. This error is deferred until gzclose(), which - will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip - stream. Alternatively, gzerror can be used before gzclose to detect this - case. - - gzread returns the number of uncompressed bytes actually read, less than - len for end of file, or -1 for error. If len is too large to fit in an int, - then nothing is read, -1 is returned, and the error state is set to - Z_STREAM_ERROR. -*/ - -ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, - gzFile file)); -/* - Read up to nitems items of size size from file to buf, otherwise operating - as gzread() does. This duplicates the interface of stdio's fread(), with - size_t request and return types. If the library defines size_t, then - z_size_t is identical to size_t. If not, then z_size_t is an unsigned - integer type that can contain a pointer. - - gzfread() returns the number of full items read of size size, or zero if - the end of the file was reached and a full item could not be read, or if - there was an error. gzerror() must be consulted if zero is returned in - order to determine if there was an error. If the multiplication of size and - nitems overflows, i.e. the product does not fit in a z_size_t, then nothing - is read, zero is returned, and the error state is set to Z_STREAM_ERROR. - - In the event that the end of file is reached and only a partial item is - available at the end, i.e. the remaining uncompressed data length is not a - multiple of size, then the final partial item is nevetheless read into buf - and the end-of-file flag is set. The length of the partial item read is not - provided, but could be inferred from the result of gztell(). This behavior - is the same as the behavior of fread() implementations in common libraries, - but it prevents the direct use of gzfread() to read a concurrently written - file, reseting and retrying on end-of-file, when size is not 1. -*/ - -ZEXTERN int ZEXPORT gzwrite OF((gzFile file, - voidpc buf, unsigned len)); -/* - Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of uncompressed bytes written or 0 in case of - error. -*/ - -ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size, - z_size_t nitems, gzFile file)); -/* - gzfwrite() writes nitems items of size size from buf to file, duplicating - the interface of stdio's fwrite(), with size_t request and return types. If - the library defines size_t, then z_size_t is identical to size_t. If not, - then z_size_t is an unsigned integer type that can contain a pointer. - - gzfwrite() returns the number of full items written of size size, or zero - if there was an error. If the multiplication of size and nitems overflows, - i.e. the product does not fit in a z_size_t, then nothing is written, zero - is returned, and the error state is set to Z_STREAM_ERROR. -*/ - -ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); -/* - Converts, formats, and writes the arguments to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written, or a negative zlib error code in case - of error. The number of uncompressed bytes written is limited to 8191, or - one less than the buffer size given to gzbuffer(). The caller should assure - that this limit is not exceeded. If it is exceeded, then gzprintf() will - return an error (0) with nothing written. In this case, there may also be a - buffer overflow with unpredictable consequences, which is possible only if - zlib was compiled with the insecure functions sprintf() or vsprintf() - because the secure snprintf() or vsnprintf() functions were not available. - This can be determined using zlibCompileFlags(). -*/ - -ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); -/* - Writes the given null-terminated string to the compressed file, excluding - the terminating null character. - - gzputs returns the number of characters written, or -1 in case of error. -*/ - -ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); -/* - Reads bytes from the compressed file until len-1 characters are read, or a - newline character is read and transferred to buf, or an end-of-file - condition is encountered. If any characters are read or if len == 1, the - string is terminated with a null character. If no characters are read due - to an end-of-file or len < 1, then the buffer is left untouched. - - gzgets returns buf which is a null-terminated string, or it returns NULL - for end-of-file or in case of error. If there was an error, the contents at - buf are indeterminate. -*/ - -ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); -/* - Writes c, converted to an unsigned char, into the compressed file. gzputc - returns the value that was written, or -1 in case of error. -*/ - -ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); -/* - Reads one byte from the compressed file. gzgetc returns this byte or -1 - in case of end of file or error. This is implemented as a macro for speed. - As such, it does not do all of the checking the other functions do. I.e. - it does not check to see if file is NULL, nor whether the structure file - points to has been clobbered or not. -*/ - -ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); -/* - Push one character back onto the stream to be read as the first character - on the next read. At least one character of push-back is allowed. - gzungetc() returns the character pushed, or -1 on failure. gzungetc() will - fail if c is -1, and may fail if a character has been pushed but not read - yet. If gzungetc is used immediately after gzopen or gzdopen, at least the - output buffer size of pushed characters is allowed. (See gzbuffer above.) - The pushed character will be discarded if the stream is repositioned with - gzseek() or gzrewind(). -*/ - -ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); -/* - Flushes all pending output into the compressed file. The parameter flush - is as in the deflate() function. The return value is the zlib error number - (see function gzerror below). gzflush is only permitted when writing. - - If the flush parameter is Z_FINISH, the remaining data is written and the - gzip stream is completed in the output. If gzwrite() is called again, a new - gzip stream will be started in the output. gzread() is able to read such - concatenated gzip streams. - - gzflush should be called only when strictly necessary because it will - degrade compression if called too often. -*/ - -/* -ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, - z_off_t offset, int whence)); - - Sets the starting position for the next gzread or gzwrite on the given - compressed file. The offset represents a number of bytes in the - uncompressed data stream. The whence parameter is defined as in lseek(2); - the value SEEK_END is not supported. - - If the file is opened for reading, this function is emulated but can be - extremely slow. If the file is opened for writing, only forward seeks are - supported; gzseek then compresses a sequence of zeroes up to the new - starting position. - - gzseek returns the resulting offset location as measured in bytes from - the beginning of the uncompressed stream, or -1 in case of error, in - particular if the file is opened for writing and the new starting position - would be before the current position. -*/ - -ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); -/* - Rewinds the given file. This function is supported only for reading. - - gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) -*/ - -/* -ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); - - Returns the starting position for the next gzread or gzwrite on the given - compressed file. This position represents a number of bytes in the - uncompressed data stream, and is zero when starting, even if appending or - reading a gzip stream from the middle of a file using gzdopen(). - - gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) -*/ - -/* -ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); - - Returns the current offset in the file being read or written. This offset - includes the count of bytes that precede the gzip stream, for example when - appending or when using gzdopen() for reading. When reading, the offset - does not include as yet unused buffered input. This information can be used - for a progress indicator. On error, gzoffset() returns -1. -*/ - -ZEXTERN int ZEXPORT gzeof OF((gzFile file)); -/* - Returns true (1) if the end-of-file indicator has been set while reading, - false (0) otherwise. Note that the end-of-file indicator is set only if the - read tried to go past the end of the input, but came up short. Therefore, - just like feof(), gzeof() may return false even if there is no more data to - read, in the event that the last read request was for the exact number of - bytes remaining in the input file. This will happen if the input file size - is an exact multiple of the buffer size. - - If gzeof() returns true, then the read functions will return no more data, - unless the end-of-file indicator is reset by gzclearerr() and the input file - has grown since the previous end of file was detected. -*/ - -ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); -/* - Returns true (1) if file is being copied directly while reading, or false - (0) if file is a gzip stream being decompressed. - - If the input file is empty, gzdirect() will return true, since the input - does not contain a gzip stream. - - If gzdirect() is used immediately after gzopen() or gzdopen() it will - cause buffers to be allocated to allow reading the file to determine if it - is a gzip file. Therefore if gzbuffer() is used, it should be called before - gzdirect(). - - When writing, gzdirect() returns true (1) if transparent writing was - requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: - gzdirect() is not needed when writing. Transparent writing must be - explicitly requested, so the application already knows the answer. When - linking statically, using gzdirect() will include all of the zlib code for - gzip file reading and decompression, which may not be desired.) -*/ - -ZEXTERN int ZEXPORT gzclose OF((gzFile file)); -/* - Flushes all pending output if necessary, closes the compressed file and - deallocates the (de)compression state. Note that once file is closed, you - cannot call gzerror with file, since its structures have been deallocated. - gzclose must not be called more than once on the same file, just as free - must not be called more than once on the same allocation. - - gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a - file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the - last read ended in the middle of a gzip stream, or Z_OK on success. -*/ - -ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); -ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); -/* - Same as gzclose(), but gzclose_r() is only for use when reading, and - gzclose_w() is only for use when writing or appending. The advantage to - using these instead of gzclose() is that they avoid linking in zlib - compression or decompression code that is not used when only reading or only - writing respectively. If gzclose() is used, then both compression and - decompression code will be included the application when linking to a static - zlib library. -*/ - -ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); -/* - Returns the error message for the last error which occurred on the given - compressed file. errnum is set to zlib error number. If an error occurred - in the file system and not in the compression library, errnum is set to - Z_ERRNO and the application may consult errno to get the exact error code. - - The application must not modify the returned string. Future calls to - this function may invalidate the previously returned string. If file is - closed, then the string previously returned by gzerror will no longer be - available. - - gzerror() should be used to distinguish errors from end-of-file for those - functions above that do not distinguish those cases in their return values. -*/ - -ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); -/* - Clears the error and end-of-file flags for file. This is analogous to the - clearerr() function in stdio. This is useful for continuing to read a gzip - file that is being written concurrently. -*/ - -#endif /* !Z_SOLO */ - - /* checksum functions */ - -/* - These functions are not related to compression but are exported - anyway because they might be useful in applications using the compression - library. -*/ - -ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); -/* - Update a running Adler-32 checksum with the bytes buf[0..len-1] and - return the updated checksum. If buf is Z_NULL, this function returns the - required initial value for the checksum. - - An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed - much faster. - - Usage example: - - uLong adler = adler32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - adler = adler32(adler, buffer, length); - } - if (adler != original_adler) error(); -*/ - -ZEXTERN uLong ZEXPORT adler32_z OF((uLong adler, const Bytef *buf, - z_size_t len)); -/* - Same as adler32(), but with a size_t length. -*/ - -/* -ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, - z_off_t len2)); - - Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 - and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for - each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of - seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note - that the z_off_t type (like off_t) is a signed integer. If len2 is - negative, the result has no meaning or utility. -*/ - -ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); -/* - Update a running CRC-32 with the bytes buf[0..len-1] and return the - updated CRC-32. If buf is Z_NULL, this function returns the required - initial value for the crc. Pre- and post-conditioning (one's complement) is - performed within this function so it shouldn't be done by the application. - - Usage example: - - uLong crc = crc32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - crc = crc32(crc, buffer, length); - } - if (crc != original_crc) error(); -*/ - -ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf, - z_size_t len)); -/* - Same as crc32(), but with a size_t length. -*/ - -/* -ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); - - Combine two CRC-32 check values into one. For two sequences of bytes, - seq1 and seq2 with lengths len1 and len2, CRC-32 check values were - calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 - check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and - len2. -*/ - - - /* various hacks, don't look :) */ - -/* deflateInit and inflateInit are macros to allow checking the zlib version - * and the compiler's view of z_stream: - */ -ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, - const char *version, int stream_size)); -ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, - const char *version, int stream_size)); -ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, - int windowBits, int memLevel, - int strategy, const char *version, - int stream_size)); -ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, - const char *version, int stream_size)); -ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, - unsigned char FAR *window, - const char *version, - int stream_size)); -#ifdef Z_PREFIX_SET -# define z_deflateInit(strm, level) \ - deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) -# define z_inflateInit(strm) \ - inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) -# define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ - deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ - (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) -# define z_inflateInit2(strm, windowBits) \ - inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ - (int)sizeof(z_stream)) -# define z_inflateBackInit(strm, windowBits, window) \ - inflateBackInit_((strm), (windowBits), (window), \ - ZLIB_VERSION, (int)sizeof(z_stream)) -#else -# define deflateInit(strm, level) \ - deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) -# define inflateInit(strm) \ - inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) -# define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ - deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ - (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) -# define inflateInit2(strm, windowBits) \ - inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ - (int)sizeof(z_stream)) -# define inflateBackInit(strm, windowBits, window) \ - inflateBackInit_((strm), (windowBits), (window), \ - ZLIB_VERSION, (int)sizeof(z_stream)) -#endif - -#ifndef Z_SOLO - -/* gzgetc() macro and its supporting function and exposed data structure. Note - * that the real internal state is much larger than the exposed structure. - * This abbreviated structure exposes just enough for the gzgetc() macro. The - * user should not mess with these exposed elements, since their names or - * behavior could change in the future, perhaps even capriciously. They can - * only be used by the gzgetc() macro. You have been warned. - */ -struct gzFile_s { - unsigned have; - unsigned char *next; - z_off64_t pos; -}; -ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ -#ifdef Z_PREFIX_SET -# undef z_gzgetc -# define z_gzgetc(g) \ - ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) -#else -# define gzgetc(g) \ - ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) -#endif - -/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or - * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if - * both are true, the application gets the *64 functions, and the regular - * functions are changed to 64 bits) -- in case these are set on systems - * without large file support, _LFS64_LARGEFILE must also be true - */ -#ifdef Z_LARGE64 - ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); - ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); - ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); - ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); - ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); - ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); -#endif - -#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) -# ifdef Z_PREFIX_SET -# define z_gzopen z_gzopen64 -# define z_gzseek z_gzseek64 -# define z_gztell z_gztell64 -# define z_gzoffset z_gzoffset64 -# define z_adler32_combine z_adler32_combine64 -# define z_crc32_combine z_crc32_combine64 -# else -# define gzopen gzopen64 -# define gzseek gzseek64 -# define gztell gztell64 -# define gzoffset gzoffset64 -# define adler32_combine adler32_combine64 -# define crc32_combine crc32_combine64 -# endif -# ifndef Z_LARGE64 - ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); - ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); - ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); - ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); - ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); - ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); -# endif -#else - ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); - ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); - ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); - ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); - ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); - ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); -#endif - -#else /* Z_SOLO */ - - ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); - ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); - -#endif /* !Z_SOLO */ - -/* undocumented functions */ -ZEXTERN const char * ZEXPORT zError OF((int)); -ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); -ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); -ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); -ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int)); -ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp)); -ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); -ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); -#if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO) -ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, - const char *mode)); -#endif -#if defined(STDC) || defined(Z_HAVE_STDARG_H) -# ifndef Z_SOLO -ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, - const char *format, - va_list va)); -# endif -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* ZLIB_H */ diff --git a/dist/darwin_amd64/include/zstd.h b/dist/darwin_amd64/include/zstd.h deleted file mode 100644 index 8c6fc6a..0000000 --- a/dist/darwin_amd64/include/zstd.h +++ /dev/null @@ -1,2090 +0,0 @@ -/* - * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef ZSTD_H_235446 -#define ZSTD_H_235446 - -/* ====== Dependency ======*/ -#include /* INT_MAX */ -#include /* size_t */ - - -/* ===== ZSTDLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define ZSTDLIB_VISIBILITY -# endif -#endif -#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY -#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define ZSTDLIB_API ZSTDLIB_VISIBILITY -#endif - - -/******************************************************************************* - Introduction - - zstd, short for Zstandard, is a fast lossless compression algorithm, targeting - real-time compression scenarios at zlib-level and better compression ratios. - The zstd compression library provides in-memory compression and decompression - functions. - - The library supports regular compression levels from 1 up to ZSTD_maxCLevel(), - which is currently 22. Levels >= 20, labeled `--ultra`, should be used with - caution, as they require more memory. The library also offers negative - compression levels, which extend the range of speed vs. ratio preferences. - The lower the level, the faster the speed (at the cost of compression). - - Compression can be done in: - - a single step (described as Simple API) - - a single step, reusing a context (described as Explicit context) - - unbounded multiple steps (described as Streaming compression) - - The compression ratio achievable on small data can be highly improved using - a dictionary. Dictionary compression can be performed in: - - a single step (described as Simple dictionary API) - - a single step, reusing a dictionary (described as Bulk-processing - dictionary API) - - Advanced experimental functions can be accessed using - `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h. - - Advanced experimental APIs should never be used with a dynamically-linked - library. They are not "stable"; their definitions or signatures may change in - the future. Only static linking is allowed. -*******************************************************************************/ - -/*------ Version ------*/ -#define ZSTD_VERSION_MAJOR 1 -#define ZSTD_VERSION_MINOR 4 -#define ZSTD_VERSION_RELEASE 5 - -#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) -ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */ - -#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE -#define ZSTD_QUOTE(str) #str -#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) -#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) -ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */ - -/* ************************************* - * Default constant - ***************************************/ -#ifndef ZSTD_CLEVEL_DEFAULT -# define ZSTD_CLEVEL_DEFAULT 3 -#endif - -/* ************************************* - * Constants - ***************************************/ - -/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */ -#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */ -#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */ -#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */ -#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0 - -#define ZSTD_BLOCKSIZELOG_MAX 17 -#define ZSTD_BLOCKSIZE_MAX (1<= `ZSTD_compressBound(srcSize)`. - * @return : compressed size written into `dst` (<= `dstCapacity), - * or an error code if it fails (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel); - -/*! ZSTD_decompress() : - * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - * `dstCapacity` is an upper bound of originalSize to regenerate. - * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, - const void* src, size_t compressedSize); - -/*! ZSTD_getFrameContentSize() : requires v1.3.0+ - * `src` should point to the start of a ZSTD encoded frame. - * `srcSize` must be at least as large as the frame header. - * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. - * @return : - decompressed size of `src` frame content, if known - * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) - * note 1 : a 0 return value means the frame is valid but "empty". - * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. - * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - * In which case, it's necessary to use streaming mode to decompress data. - * Optionally, application can rely on some implicit limit, - * as ZSTD_decompress() only needs an upper bound of decompressed size. - * (For example, data could be necessarily cut into blocks <= 16 KB). - * note 3 : decompressed size is always present when compression is completed using single-pass functions, - * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). - * note 4 : decompressed size can be very large (64-bits value), - * potentially larger than what local system can handle as a single memory segment. - * In which case, it's necessary to use streaming mode to decompress data. - * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. - * Always ensure return value fits within application's authorized limits. - * Each application can set its own limits. - * note 6 : This function replaces ZSTD_getDecompressedSize() */ -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) -#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) -ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); - -/*! ZSTD_getDecompressedSize() : - * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). - * Both functions work the same way, but ZSTD_getDecompressedSize() blends - * "empty", "unknown" and "error" results to the same return value (0), - * while ZSTD_getFrameContentSize() gives them separate return values. - * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ -ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); - -/*! ZSTD_findFrameCompressedSize() : - * `src` should point to the start of a ZSTD frame or skippable frame. - * `srcSize` must be >= first frame size - * @return : the compressed size of the first frame starting at `src`, - * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, - * or an error code if input is invalid */ -ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); - - -/*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ -ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ -ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ -ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */ -ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ - - -/*************************************** -* Explicit context -***************************************/ -/*= Compression context - * When compressing many times, - * it is recommended to allocate a context just once, - * and re-use it for each successive compression operation. - * This will make workload friendlier for system's memory. - * Note : re-using context is just a speed / resource optimization. - * It doesn't change the compression ratio, which remains identical. - * Note 2 : In multi-threaded environments, - * use one different context per thread for parallel execution. - */ -typedef struct ZSTD_CCtx_s ZSTD_CCtx; -ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); -ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); - -/*! ZSTD_compressCCtx() : - * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. - * Important : in order to behave similarly to `ZSTD_compress()`, - * this function compresses at requested compression level, - * __ignoring any other parameter__ . - * If any advanced parameter was set using the advanced API, - * they will all be reset. Only `compressionLevel` remains. - */ -ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel); - -/*= Decompression context - * When decompressing many times, - * it is recommended to allocate a context only once, - * and re-use it for each successive compression operation. - * This will make workload friendlier for system's memory. - * Use one context per thread for parallel execution. */ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; -ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); -ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); - -/*! ZSTD_decompressDCtx() : - * Same as ZSTD_decompress(), - * requires an allocated ZSTD_DCtx. - * Compatible with sticky parameters. - */ -ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - - -/*************************************** -* Advanced compression API -***************************************/ - -/* API design : - * Parameters are pushed one by one into an existing context, - * using ZSTD_CCtx_set*() functions. - * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. - * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! - * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ . - * - * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). - * - * This API supercedes all other "advanced" API entry points in the experimental section. - * In the future, we expect to remove from experimental API entry points which are redundant with this API. - */ - - -/* Compression strategies, listed from fastest to strongest */ -typedef enum { ZSTD_fast=1, - ZSTD_dfast=2, - ZSTD_greedy=3, - ZSTD_lazy=4, - ZSTD_lazy2=5, - ZSTD_btlazy2=6, - ZSTD_btopt=7, - ZSTD_btultra=8, - ZSTD_btultra2=9 - /* note : new strategies _might_ be added in the future. - Only the order (from fast to strong) is guaranteed */ -} ZSTD_strategy; - - -typedef enum { - - /* compression parameters - * Note: When compressing with a ZSTD_CDict these parameters are superseded - * by the parameters used to construct the ZSTD_CDict. - * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */ - ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table. - * Note that exact compression parameters are dynamically determined, - * depending on both compression level and srcSize (when known). - * Default level is ZSTD_CLEVEL_DEFAULT==3. - * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. - * Note 1 : it's possible to pass a negative compression level. - * Note 2 : setting a level does not automatically set all other compression parameters - * to default. Setting this will however eventually dynamically impact the compression - * parameters which have not been manually set. The manually set - * ones will 'stick'. */ - /* Advanced compression parameters : - * It's possible to pin down compression parameters to some specific values. - * In which case, these values are no longer dynamically selected by the compressor */ - ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2. - * This will set a memory budget for streaming decompression, - * with larger values requiring more memory - * and typically compressing more. - * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. - * Special: value 0 means "use default windowLog". - * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT - * requires explicitly allowing such size at streaming decompression stage. */ - ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2. - * Resulting memory usage is (1 << (hashLog+2)). - * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. - * Larger tables improve compression ratio of strategies <= dFast, - * and improve speed of strategies > dFast. - * Special: value 0 means "use default hashLog". */ - ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2. - * Resulting memory usage is (1 << (chainLog+2)). - * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. - * Larger tables result in better and slower compression. - * This parameter is useless for "fast" strategy. - * It's still useful when using "dfast" strategy, - * in which case it defines a secondary probe table. - * Special: value 0 means "use default chainLog". */ - ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2. - * More attempts result in better and slower compression. - * This parameter is useless for "fast" and "dFast" strategies. - * Special: value 0 means "use default searchLog". */ - ZSTD_c_minMatch=105, /* Minimum size of searched matches. - * Note that Zstandard can still find matches of smaller size, - * it just tweaks its search algorithm to look for this size and larger. - * Larger values increase compression and decompression speed, but decrease ratio. - * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. - * Note that currently, for all strategies < btopt, effective minimum is 4. - * , for all strategies > fast, effective maximum is 6. - * Special: value 0 means "use default minMatchLength". */ - ZSTD_c_targetLength=106, /* Impact of this field depends on strategy. - * For strategies btopt, btultra & btultra2: - * Length of Match considered "good enough" to stop search. - * Larger values make compression stronger, and slower. - * For strategy fast: - * Distance between match sampling. - * Larger values make compression faster, and weaker. - * Special: value 0 means "use default targetLength". */ - ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition. - * The higher the value of selected strategy, the more complex it is, - * resulting in stronger and slower compression. - * Special: value 0 means "use default strategy". */ - - /* LDM mode parameters */ - ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. - * This parameter is designed to improve compression ratio - * for large inputs, by finding large matches at long distance. - * It increases memory usage and window size. - * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB - * except when expressly set to a different value. */ - ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2. - * Larger values increase memory usage and compression ratio, - * but decrease compression speed. - * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX - * default: windowlog - 7. - * Special: value 0 means "automatically determine hashlog". */ - ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher. - * Larger/too small values usually decrease compression ratio. - * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. - * Special: value 0 means "use default value" (default: 64). */ - ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution. - * Larger values improve collision resolution but decrease compression speed. - * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. - * Special: value 0 means "use default value" (default: 3). */ - ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table. - * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). - * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. - * Larger values improve compression speed. - * Deviating far from default value will likely result in a compression ratio decrease. - * Special: value 0 means "automatically determine hashRateLog". */ - - /* frame parameters */ - ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1) - * Content size must be known at the beginning of compression. - * This is automatically the case when using ZSTD_compress2(), - * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ - ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */ - ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */ - - /* multi-threading parameters */ - /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD). - * They return an error otherwise. */ - ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel. - * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() : - * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, - * while compression work is performed in parallel, within worker threads. - * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : - * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). - * More workers improve speed, but also increase memory usage. - * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */ - ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1. - * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. - * 0 means default, which is dynamically determined based on compression parameters. - * Job size must be a minimum of overlap size, or 1 MB, whichever is largest. - * The minimum size is automatically and transparently enforced. */ - ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size. - * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. - * It helps preserve compression ratio, while each job is compressed in parallel. - * This value is enforced only when nbWorkers >= 1. - * Larger values increase compression ratio, but decrease speed. - * Possible values range from 0 to 9 : - * - 0 means "default" : value will be determined by the library, depending on strategy - * - 1 means "no overlap" - * - 9 means "full overlap", using a full window size. - * Each intermediate rank increases/decreases load size by a factor 2 : - * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default - * default value varies between 6 and 9, depending on strategy */ - - /* note : additional experimental parameters are also available - * within the experimental section of the API. - * At the time of this writing, they include : - * ZSTD_c_rsyncable - * ZSTD_c_format - * ZSTD_c_forceMaxWindow - * ZSTD_c_forceAttachDict - * ZSTD_c_literalCompressionMode - * ZSTD_c_targetCBlockSize - * ZSTD_c_srcSizeHint - * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. - * note : never ever use experimentalParam? names directly; - * also, the enums values themselves are unstable and can still change. - */ - ZSTD_c_experimentalParam1=500, - ZSTD_c_experimentalParam2=10, - ZSTD_c_experimentalParam3=1000, - ZSTD_c_experimentalParam4=1001, - ZSTD_c_experimentalParam5=1002, - ZSTD_c_experimentalParam6=1003, - ZSTD_c_experimentalParam7=1004 -} ZSTD_cParameter; - -typedef struct { - size_t error; - int lowerBound; - int upperBound; -} ZSTD_bounds; - -/*! ZSTD_cParam_getBounds() : - * All parameters must belong to an interval with lower and upper bounds, - * otherwise they will either trigger an error or be automatically clamped. - * @return : a structure, ZSTD_bounds, which contains - * - an error status field, which must be tested using ZSTD_isError() - * - lower and upper bounds, both inclusive - */ -ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam); - -/*! ZSTD_CCtx_setParameter() : - * Set one compression parameter, selected by enum ZSTD_cParameter. - * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). - * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). - * Setting a parameter is generally only possible during frame initialization (before starting compression). - * Exception : when using multi-threading mode (nbWorkers >= 1), - * the following parameters can be updated _during_ compression (within same frame): - * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. - * new parameters will be active for next job only (after a flush()). - * @return : an error code (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value); - -/*! ZSTD_CCtx_setPledgedSrcSize() : - * Total input data size to be compressed as a single frame. - * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. - * This value will also be controlled at end of frame, and trigger an error if not respected. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. - * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. - * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. - * Note 2 : pledgedSrcSize is only valid once, for the next frame. - * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. - * Note 3 : Whenever all input data is provided and consumed in a single round, - * for example with ZSTD_compress2(), - * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), - * this value is automatically overridden by srcSize instead. - */ -ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize); - -typedef enum { - ZSTD_reset_session_only = 1, - ZSTD_reset_parameters = 2, - ZSTD_reset_session_and_parameters = 3 -} ZSTD_ResetDirective; - -/*! ZSTD_CCtx_reset() : - * There are 2 different things that can be reset, independently or jointly : - * - The session : will stop compressing current frame, and make CCtx ready to start a new one. - * Useful after an error, or to interrupt any ongoing compression. - * Any internal data not yet flushed is cancelled. - * Compression parameters and dictionary remain unchanged. - * They will be used to compress next frame. - * Resetting session never fails. - * - The parameters : changes all parameters back to "default". - * This removes any reference to any dictionary too. - * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) - * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) - * - Both : similar to resetting the session, followed by resetting parameters. - */ -ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); - -/*! ZSTD_compress2() : - * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. - * ZSTD_compress2() always starts a new frame. - * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. - * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - * - The function is always blocking, returns when compression is completed. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. - * @return : compressed size written into `dst` (<= `dstCapacity), - * or an error code if it fails (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - - -/*************************************** -* Advanced decompression API -***************************************/ - -/* The advanced API pushes parameters one by one into an existing DCtx context. - * Parameters are sticky, and remain valid for all following frames - * using the same DCtx context. - * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). - * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). - * Therefore, no new decompression function is necessary. - */ - -typedef enum { - - ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which - * the streaming API will refuse to allocate memory buffer - * in order to protect the host from unreasonable memory requirements. - * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. - * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). - * Special: value 0 means "use default maximum windowLog". */ - - /* note : additional experimental parameters are also available - * within the experimental section of the API. - * At the time of this writing, they include : - * ZSTD_d_format - * ZSTD_d_stableOutBuffer - * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. - * note : never ever use experimentalParam? names directly - */ - ZSTD_d_experimentalParam1=1000, - ZSTD_d_experimentalParam2=1001 - -} ZSTD_dParameter; - -/*! ZSTD_dParam_getBounds() : - * All parameters must belong to an interval with lower and upper bounds, - * otherwise they will either trigger an error or be automatically clamped. - * @return : a structure, ZSTD_bounds, which contains - * - an error status field, which must be tested using ZSTD_isError() - * - both lower and upper bounds, inclusive - */ -ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam); - -/*! ZSTD_DCtx_setParameter() : - * Set one compression parameter, selected by enum ZSTD_dParameter. - * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). - * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). - * Setting a parameter is only possible during frame initialization (before starting decompression). - * @return : 0, or an error code (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value); - -/*! ZSTD_DCtx_reset() : - * Return a DCtx to clean state. - * Session and parameters can be reset jointly or separately. - * Parameters can only be reset when no active frame is being decompressed. - * @return : 0, or an error code, which can be tested with ZSTD_isError() - */ -ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset); - - -/**************************** -* Streaming -****************************/ - -typedef struct ZSTD_inBuffer_s { - const void* src; /**< start of input buffer */ - size_t size; /**< size of input buffer */ - size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ -} ZSTD_inBuffer; - -typedef struct ZSTD_outBuffer_s { - void* dst; /**< start of output buffer */ - size_t size; /**< size of output buffer */ - size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ -} ZSTD_outBuffer; - - - -/*-*********************************************************************** -* Streaming compression - HowTo -* -* A ZSTD_CStream object is required to track streaming operation. -* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. -* ZSTD_CStream objects can be reused multiple times on consecutive compression operations. -* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. -* -* For parallel execution, use one separate ZSTD_CStream per thread. -* -* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing. -* -* Parameters are sticky : when starting a new compression on the same context, -* it will re-use the same sticky parameters as previous compression session. -* When in doubt, it's recommended to fully initialize the context before usage. -* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), -* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to -* set more specific parameters, the pledged source size, or load a dictionary. -* -* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to -* consume input stream. The function will automatically update both `pos` -* fields within `input` and `output`. -* Note that the function may not consume the entire input, for example, because -* the output buffer is already full, in which case `input.pos < input.size`. -* The caller must check if input has been entirely consumed. -* If not, the caller must make some room to receive more compressed data, -* and then present again remaining input data. -* note: ZSTD_e_continue is guaranteed to make some forward progress when called, -* but doesn't guarantee maximal forward progress. This is especially relevant -* when compressing with multiple threads. The call won't block if it can -* consume some input, but if it can't it will wait for some, but not all, -* output to be flushed. -* @return : provides a minimum amount of data remaining to be flushed from internal buffers -* or an error code, which can be tested using ZSTD_isError(). -* -* At any moment, it's possible to flush whatever data might remain stuck within internal buffer, -* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated. -* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0). -* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush. -* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the -* operation. -* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will -* block until the flush is complete or the output buffer is full. -* @return : 0 if internal buffers are entirely flushed, -* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), -* or an error code, which can be tested using ZSTD_isError(). -* -* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame. -* It will perform a flush and write frame epilogue. -* The epilogue is required for decoders to consider a frame completed. -* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush. -* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to -* start a new frame. -* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will -* block until the flush is complete or the output buffer is full. -* @return : 0 if frame fully completed and fully flushed, -* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), -* or an error code, which can be tested using ZSTD_isError(). -* -* *******************************************************************/ - -typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */ - /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */ -/*===== ZSTD_CStream management functions =====*/ -ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); -ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); - -/*===== Streaming compression functions =====*/ -typedef enum { - ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */ - ZSTD_e_flush=1, /* flush any data provided so far, - * it creates (at least) one new block, that can be decoded immediately on reception; - * frame will continue: any future data can still reference previously compressed data, improving compression. - * note : multithreaded compression will block to flush as much output as possible. */ - ZSTD_e_end=2 /* flush any remaining data _and_ close current frame. - * note that frame is only closed after compressed data is fully flushed (return value == 0). - * After that point, any additional data starts a new frame. - * note : each frame is independent (does not reference any content from previous frame). - : note : multithreaded compression will block to flush as much output as possible. */ -} ZSTD_EndDirective; - -/*! ZSTD_compressStream2() : - * Behaves about the same as ZSTD_compressStream, with additional control on end directive. - * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode) - * - output->pos must be <= dstCapacity, input->pos must be <= srcSize - * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. - * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller. - * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available, - * and then immediately returns, just indicating that there is some data remaining to be flushed. - * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte. - * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking. - * - @return provides a minimum amount of data remaining to be flushed from internal buffers - * or an error code, which can be tested using ZSTD_isError(). - * if @return != 0, flush is not fully completed, there is still some data left within internal buffers. - * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers. - * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed. - * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0), - * only ZSTD_e_end or ZSTD_e_flush operations are allowed. - * Before starting a new compression job, or changing compression parameters, - * it is required to fully flush internal buffers. - */ -ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp); - - -/* These buffer sizes are softly recommended. - * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output. - * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(), - * reducing the amount of memory shuffling and buffering, resulting in minor performance savings. - * - * However, note that these recommendations are from the perspective of a C caller program. - * If the streaming interface is invoked from some other language, - * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo, - * a major performance rule is to reduce crossing such interface to an absolute minimum. - * It's not rare that performance ends being spent more into the interface, rather than compression itself. - * In which cases, prefer using large buffers, as large as practical, - * for both input and output, to reduce the nb of roundtrips. - */ -ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */ -ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */ - - -/* ***************************************************************************** - * This following is a legacy streaming API. - * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). - * It is redundant, but remains fully supported. - * Advanced parameters and dictionary compression can only be used through the - * new API. - ******************************************************************************/ - -/*! - * Equivalent to: - * - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - */ -ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); -/*! - * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). - * NOTE: The return value is different. ZSTD_compressStream() returns a hint for - * the next read size (if non-zero and not an error). ZSTD_compressStream2() - * returns the minimum nb of bytes left to flush (if non-zero and not an error). - */ -ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); -/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */ -ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); -/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ -ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); - - -/*-*************************************************************************** -* Streaming decompression - HowTo -* -* A ZSTD_DStream object is required to track streaming operations. -* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. -* ZSTD_DStream objects can be re-used multiple times. -* -* Use ZSTD_initDStream() to start a new decompression operation. -* @return : recommended first input size -* Alternatively, use advanced API to set specific properties. -* -* Use ZSTD_decompressStream() repetitively to consume your input. -* The function will update both `pos` fields. -* If `input.pos < input.size`, some input has not been consumed. -* It's up to the caller to present again remaining data. -* The function tries to flush all data decoded immediately, respecting output buffer size. -* If `output.pos < output.size`, decoder has flushed everything it could. -* But if `output.pos == output.size`, there might be some data left within internal buffers., -* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. -* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. -* @return : 0 when a frame is completely decoded and fully flushed, -* or an error code, which can be tested using ZSTD_isError(), -* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : -* the return value is a suggested next input size (just a hint for better latency) -* that will never request more than the remaining frame size. -* *******************************************************************************/ - -typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ - /* For compatibility with versions <= v1.2.0, prefer differentiating them. */ -/*===== ZSTD_DStream management functions =====*/ -ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); -ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); - -/*===== Streaming decompression functions =====*/ - -/* This function is redundant with the advanced API and equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * ZSTD_DCtx_refDDict(zds, NULL); - */ -ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); - -ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); - -ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ -ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */ - - -/************************** -* Simple dictionary API -***************************/ -/*! ZSTD_compress_usingDict() : - * Compression at an explicit compression level using a Dictionary. - * A dictionary can be any arbitrary data segment (also called a prefix), - * or a buffer with specified information (see dictBuilder/zdict.h). - * Note : This function loads the dictionary, resulting in significant startup delay. - * It's intended for a dictionary used only once. - * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ -ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - int compressionLevel); - -/*! ZSTD_decompress_usingDict() : - * Decompression using a known Dictionary. - * Dictionary must be identical to the one used during compression. - * Note : This function loads the dictionary, resulting in significant startup delay. - * It's intended for a dictionary used only once. - * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ -ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize); - - -/*********************************** - * Bulk processing dictionary API - **********************************/ -typedef struct ZSTD_CDict_s ZSTD_CDict; - -/*! ZSTD_createCDict() : - * When compressing multiple messages or blocks using the same dictionary, - * it's recommended to digest the dictionary only once, since it's a costly operation. - * ZSTD_createCDict() will create a state from digesting a dictionary. - * The resulting state can be used for future compression operations with very limited startup cost. - * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. - * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. - * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, - * in which case the only thing that it transports is the @compressionLevel. - * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, - * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, - int compressionLevel); - -/*! ZSTD_freeCDict() : - * Function frees memory allocated by ZSTD_createCDict(). */ -ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); - -/*! ZSTD_compress_usingCDict() : - * Compression using a digested Dictionary. - * Recommended when same dictionary is used multiple times. - * Note : compression level is _decided at dictionary creation time_, - * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */ -ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict); - - -typedef struct ZSTD_DDict_s ZSTD_DDict; - -/*! ZSTD_createDDict() : - * Create a digested dictionary, ready to start decompression operation without startup delay. - * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); - -/*! ZSTD_freeDDict() : - * Function frees memory allocated with ZSTD_createDDict() */ -ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); - -/*! ZSTD_decompress_usingDDict() : - * Decompression using a digested Dictionary. - * Recommended when same dictionary is used multiple times. */ -ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_DDict* ddict); - - -/******************************** - * Dictionary helper functions - *******************************/ - -/*! ZSTD_getDictID_fromDict() : - * Provides the dictID stored within dictionary. - * if @return == 0, the dictionary is not conformant with Zstandard specification. - * It can still be loaded, but as a content-only dictionary. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); - -/*! ZSTD_getDictID_fromDDict() : - * Provides the dictID of the dictionary loaded into `ddict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); - -/*! ZSTD_getDictID_fromFrame() : - * Provides the dictID required to decompressed the frame stored within `src`. - * If @return == 0, the dictID could not be decoded. - * This could for one of the following reasons : - * - The frame does not require a dictionary to be decoded (most common case). - * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. - * Note : this use case also happens when using a non-conformant dictionary. - * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). - * - This is not a Zstandard frame. - * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); - - -/******************************************************************************* - * Advanced dictionary and prefix API - * - * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and - * only reset with the context is reset with ZSTD_reset_parameters or - * ZSTD_reset_session_and_parameters. Prefixes are single-use. - ******************************************************************************/ - - -/*! ZSTD_CCtx_loadDictionary() : - * Create an internal CDict from `dict` buffer. - * Decompression will have to use same dictionary. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, - * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). - * Note 2 : Loading a dictionary involves building tables. - * It's also a CPU consuming operation, with non-negligible impact on latency. - * Tables are dependent on compression parameters, and for this reason, - * compression parameters can no longer be changed after loading a dictionary. - * Note 3 :`dict` content will be copied internally. - * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. - * In such a case, dictionary buffer must outlive its users. - * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - * to precisely select how dictionary content must be interpreted. */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); - -/*! ZSTD_CCtx_refCDict() : - * Reference a prepared dictionary, to be used for all next compressed frames. - * Note that compression parameters are enforced from within CDict, - * and supersede any compression parameter previously set within CCtx. - * The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. - * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. - * The dictionary will remain valid for future compressed frames using same CCtx. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special : Referencing a NULL CDict means "return to no-dictionary mode". - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. - * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ -ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); - -/*! ZSTD_CCtx_refPrefix() : - * Reference a prefix (single-usage dictionary) for next compressed frame. - * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). - * Decompression will need same prefix to properly regenerate data. - * Compressing with a prefix is similar in outcome as performing a diff and compressing it, - * but performs much faster, especially during decompression (compression speed is tunable with compression level). - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary - * Note 1 : Prefix buffer is referenced. It **must** outlive compression. - * Its content must remain unmodified during compression. - * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, - * ensure that the window size is large enough to contain the entire source. - * See ZSTD_c_windowLog. - * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. - * It's a CPU consuming operation, with non-negligible impact on latency. - * If there is a need to use the same prefix multiple times, consider loadDictionary instead. - * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). - * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ -ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, - const void* prefix, size_t prefixSize); - -/*! ZSTD_DCtx_loadDictionary() : - * Create an internal DDict from dict buffer, - * to be used to decompress next frames. - * The dictionary remains valid for all future frames, until explicitly invalidated. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, - * meaning "return to no-dictionary mode". - * Note 1 : Loading a dictionary involves building tables, - * which has a non-negligible impact on CPU usage and latency. - * It's recommended to "load once, use many times", to amortize the cost - * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. - * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. - * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of - * how dictionary content is loaded and interpreted. - */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); - -/*! ZSTD_DCtx_refDDict() : - * Reference a prepared dictionary, to be used to decompress next frames. - * The dictionary remains active for decompression of future frames using same DCtx. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. - * Special: referencing a NULL DDict means "return to no-dictionary mode". - * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. - */ -ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); - -/*! ZSTD_DCtx_refPrefix() : - * Reference a prefix (single-usage dictionary) to decompress next frame. - * This is the reverse operation of ZSTD_CCtx_refPrefix(), - * and must use the same prefix as the one used during compression. - * Prefix is **only used once**. Reference is discarded at end of frame. - * End of frame is reached when ZSTD_decompressStream() returns 0. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary - * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. - * Prefix buffer must remain unmodified up to the end of frame, - * reached when ZSTD_decompressStream() returns 0. - * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). - * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) - * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. - * A full dictionary is more costly, as it requires building tables. - */ -ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, - const void* prefix, size_t prefixSize); - -/* === Memory management === */ - -/*! ZSTD_sizeof_*() : - * These functions give the _current_ memory usage of selected object. - * Note that object memory usage can evolve (increase or decrease) over time. */ -ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); -ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); -ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); -ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); -ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); - -#endif /* ZSTD_H_235446 */ - - -/* ************************************************************************************** - * ADVANCED AND EXPERIMENTAL FUNCTIONS - **************************************************************************************** - * The definitions in the following section are considered experimental. - * They are provided for advanced scenarios. - * They should never be used with a dynamic library, as prototypes may change in the future. - * Use them only in association with static linking. - * ***************************************************************************************/ - -#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) -#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY - -/**************************************************************************************** - * experimental API (static linking only) - **************************************************************************************** - * The following symbols and constants - * are not planned to join "stable API" status in the near future. - * They can still change in future versions. - * Some of them are planned to remain in the static_only section indefinitely. - * Some of them might be removed in the future (especially when redundant with existing stable functions) - * ***************************************************************************************/ - -#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */ -#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2) -#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */ -#define ZSTD_SKIPPABLEHEADERSIZE 8 - -/* compression parameter bounds */ -#define ZSTD_WINDOWLOG_MAX_32 30 -#define ZSTD_WINDOWLOG_MAX_64 31 -#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64)) -#define ZSTD_WINDOWLOG_MIN 10 -#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30) -#define ZSTD_HASHLOG_MIN 6 -#define ZSTD_CHAINLOG_MAX_32 29 -#define ZSTD_CHAINLOG_MAX_64 30 -#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64)) -#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN -#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) -#define ZSTD_SEARCHLOG_MIN 1 -#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */ -#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */ -#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX -#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ -#define ZSTD_STRATEGY_MIN ZSTD_fast -#define ZSTD_STRATEGY_MAX ZSTD_btultra2 - - -#define ZSTD_OVERLAPLOG_MIN 0 -#define ZSTD_OVERLAPLOG_MAX 9 - -#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame - * requiring larger than (1< 3, then this is seqDef.offset - 3 - * If seqDef.offset < 3, then this is the corresponding repeat offset - * But if seqDef.offset < 3 and litLength == 0, this is the - * repeat offset before the corresponding repeat offset - * And if seqDef.offset == 3 and litLength == 0, this is the - * most recent repeat offset - 1 - */ - unsigned int offset; - unsigned int litLength; /* Literal length */ - unsigned int matchLength; /* Match length */ - /* 0 when seq not rep and seqDef.offset otherwise - * when litLength == 0 this will be <= 4, otherwise <= 3 like normal - */ - unsigned int rep; -} ZSTD_Sequence; - -typedef struct { - unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ - unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ - unsigned hashLog; /**< dispatch table : larger == faster, more memory */ - unsigned searchLog; /**< nb of searches : larger == more compression, slower */ - unsigned minMatch; /**< match length searched : larger == faster decompression, sometimes less compression */ - unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ - ZSTD_strategy strategy; /**< see ZSTD_strategy definition above */ -} ZSTD_compressionParameters; - -typedef struct { - int contentSizeFlag; /**< 1: content size will be in frame header (when known) */ - int checksumFlag; /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ - int noDictIDFlag; /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ -} ZSTD_frameParameters; - -typedef struct { - ZSTD_compressionParameters cParams; - ZSTD_frameParameters fParams; -} ZSTD_parameters; - -typedef enum { - ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ - ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ - ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ -} ZSTD_dictContentType_e; - -typedef enum { - ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */ - ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ -} ZSTD_dictLoadMethod_e; - -typedef enum { - ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */ - ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number. - * Useful to save 4 bytes per generated frame. - * Decoder cannot recognise automatically this format, requiring this instruction. */ -} ZSTD_format_e; - -typedef enum { - /* Note: this enum and the behavior it controls are effectively internal - * implementation details of the compressor. They are expected to continue - * to evolve and should be considered only in the context of extremely - * advanced performance tuning. - * - * Zstd currently supports the use of a CDict in three ways: - * - * - The contents of the CDict can be copied into the working context. This - * means that the compression can search both the dictionary and input - * while operating on a single set of internal tables. This makes - * the compression faster per-byte of input. However, the initial copy of - * the CDict's tables incurs a fixed cost at the beginning of the - * compression. For small compressions (< 8 KB), that copy can dominate - * the cost of the compression. - * - * - The CDict's tables can be used in-place. In this model, compression is - * slower per input byte, because the compressor has to search two sets of - * tables. However, this model incurs no start-up cost (as long as the - * working context's tables can be reused). For small inputs, this can be - * faster than copying the CDict's tables. - * - * - The CDict's tables are not used at all, and instead we use the working - * context alone to reload the dictionary and use params based on the source - * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict(). - * This method is effective when the dictionary sizes are very small relative - * to the input size, and the input size is fairly large to begin with. - * - * Zstd has a simple internal heuristic that selects which strategy to use - * at the beginning of a compression. However, if experimentation shows that - * Zstd is making poor choices, it is possible to override that choice with - * this enum. - */ - ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */ - ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */ - ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */ - ZSTD_dictForceLoad = 3 /* Always reload the dictionary */ -} ZSTD_dictAttachPref_e; - -typedef enum { - ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level. - * Negative compression levels will be uncompressed, and positive compression - * levels will be compressed. */ - ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be - * emitted if Huffman compression is not profitable. */ - ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */ -} ZSTD_literalCompressionMode_e; - - -/*************************************** -* Frame size functions -***************************************/ - -/*! ZSTD_findDecompressedSize() : - * `src` should point to the start of a series of ZSTD encoded and/or skippable frames - * `srcSize` must be the _exact_ size of this series - * (i.e. there should be a frame boundary at `src + srcSize`) - * @return : - decompressed size of all data in all successive frames - * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN - * - if an error occurred: ZSTD_CONTENTSIZE_ERROR - * - * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. - * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - * In which case, it's necessary to use streaming mode to decompress data. - * note 2 : decompressed size is always present when compression is done with ZSTD_compress() - * note 3 : decompressed size can be very large (64-bits value), - * potentially larger than what local system can handle as a single memory segment. - * In which case, it's necessary to use streaming mode to decompress data. - * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. - * Always ensure result fits within application's authorized limits. - * Each application can set its own limits. - * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to - * read each contained frame header. This is fast as most of the data is skipped, - * however it does mean that all frame data must be present and valid. */ -ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); - -/*! ZSTD_decompressBound() : - * `src` should point to the start of a series of ZSTD encoded and/or skippable frames - * `srcSize` must be the _exact_ size of this series - * (i.e. there should be a frame boundary at `src + srcSize`) - * @return : - upper-bound for the decompressed size of all data in all successive frames - * - if an error occured: ZSTD_CONTENTSIZE_ERROR - * - * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame. - * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`. - * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value. - * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: - * upper-bound = # blocks * min(128 KB, Window_Size) - */ -ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); - -/*! ZSTD_frameHeaderSize() : - * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. - * @return : size of the Frame Header, - * or an error code (if srcSize is too small) */ -ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); - -/*! ZSTD_getSequences() : - * Extract sequences from the sequence store - * zc can be used to insert custom compression params. - * This function invokes ZSTD_compress2 - * @return : number of sequences extracted - */ -ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, - size_t outSeqsSize, const void* src, size_t srcSize); - - -/*************************************** -* Memory management -***************************************/ - -/*! ZSTD_estimate*() : - * These functions make it possible to estimate memory usage - * of a future {D,C}Ctx, before its creation. - * - * ZSTD_estimateCCtxSize() will provide a memory budget large enough - * for any compression level up to selected one. - * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate - * does not include space for a window buffer. - * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming. - * The estimate will assume the input may be arbitrarily large, - * which is the worst case. - * - * When srcSize can be bound by a known and rather "small" value, - * this fact can be used to provide a tighter estimation - * because the CCtx compression context will need less memory. - * This tighter estimation can be provided by more advanced functions - * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), - * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). - * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. - * - * Note 2 : only single-threaded compression is supported. - * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. - */ -ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); - -/*! ZSTD_estimateCStreamSize() : - * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. - * It will also consider src size to be arbitrarily "large", which is worst case. - * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. - * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. - * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. - * Note : CStream size estimation is only correct for single-threaded compression. - * ZSTD_DStream memory budget depends on window Size. - * This information can be passed manually, using ZSTD_estimateDStreamSize, - * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); - * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), - * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize */ -ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); - -/*! ZSTD_estimate?DictSize() : - * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). - * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). - * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. - */ -ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); -ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); - -/*! ZSTD_initStatic*() : - * Initialize an object using a pre-allocated fixed-size buffer. - * workspace: The memory area to emplace the object into. - * Provided pointer *must be 8-bytes aligned*. - * Buffer must outlive object. - * workspaceSize: Use ZSTD_estimate*Size() to determine - * how large workspace must be to support target scenario. - * @return : pointer to object (same address as workspace, just different type), - * or NULL if error (size too small, incorrect alignment, etc.) - * Note : zstd will never resize nor malloc() when using a static buffer. - * If the object requires more memory than available, - * zstd will just error out (typically ZSTD_error_memory_allocation). - * Note 2 : there is no corresponding "free" function. - * Since workspace is allocated externally, it must be freed externally too. - * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level - * into its associated cParams. - * Limitation 1 : currently not compatible with internal dictionary creation, triggered by - * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). - * Limitation 2 : static cctx currently not compatible with multi-threading. - * Limitation 3 : static dctx is incompatible with legacy support. - */ -ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ - -ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ - -ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( - void* workspace, size_t workspaceSize, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams); - -ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( - void* workspace, size_t workspaceSize, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType); - - -/*! Custom memory allocation : - * These prototypes make it possible to pass your own allocation/free functions. - * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. - * All allocation/free operations will be completed using these custom variants instead of regular ones. - */ -typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); -typedef void (*ZSTD_freeFunction) (void* opaque, void* address); -typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; -static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ - -ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); - -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams, - ZSTD_customMem customMem); - -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_customMem customMem); - - - -/*************************************** -* Advanced compression functions -***************************************/ - -/*! ZSTD_createCDict_byReference() : - * Create a digested dictionary for compression - * Dictionary content is just referenced, not duplicated. - * As a consequence, `dictBuffer` **must** outlive CDict, - * and its content must remain unmodified throughout the lifetime of CDict. - * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); - -/*! ZSTD_getCParams() : - * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. - * `estimatedSrcSize` value is optional, select 0 if not known */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); - -/*! ZSTD_getParams() : - * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. - * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ -ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); - -/*! ZSTD_checkCParams() : - * Ensure param values remain within authorized range. - * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ -ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); - -/*! ZSTD_adjustCParams() : - * optimize params for a given `srcSize` and `dictSize`. - * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. - * `dictSize` must be `0` when there is no dictionary. - * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. - * This function never fails (wide contract) */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); - -/*! ZSTD_compress_advanced() : - * Note : this function is now DEPRECATED. - * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. - * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params); - -/*! ZSTD_compress_usingCDict_advanced() : - * Note : this function is now REDUNDANT. - * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. - * This prototype will be marked as deprecated and generate compilation warning in some future version */ -ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams); - - -/*! ZSTD_CCtx_loadDictionary_byReference() : - * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. - * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); - -/*! ZSTD_CCtx_loadDictionary_advanced() : - * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over - * how to load the dictionary (by copy ? by reference ?) - * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); - -/*! ZSTD_CCtx_refPrefix_advanced() : - * Same as ZSTD_CCtx_refPrefix(), but gives finer control over - * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); - -/* === experimental parameters === */ -/* these parameters can be used with ZSTD_setParameter() - * they are not guaranteed to remain supported in the future */ - - /* Enables rsyncable mode, - * which makes compressed files more rsync friendly - * by adding periodic synchronization points to the compressed data. - * The target average block size is ZSTD_c_jobSize / 2. - * It's possible to modify the job size to increase or decrease - * the granularity of the synchronization point. - * Once the jobSize is smaller than the window size, - * it will result in compression ratio degradation. - * NOTE 1: rsyncable mode only works when multithreading is enabled. - * NOTE 2: rsyncable performs poorly in combination with long range mode, - * since it will decrease the effectiveness of synchronization points, - * though mileage may vary. - * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s. - * If the selected compression level is already running significantly slower, - * the overall speed won't be significantly impacted. - */ - #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1 - -/* Select a compression format. - * The value must be of type ZSTD_format_e. - * See ZSTD_format_e enum definition for details */ -#define ZSTD_c_format ZSTD_c_experimentalParam2 - -/* Force back-reference distances to remain < windowSize, - * even when referencing into Dictionary content (default:0) */ -#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3 - -/* Controls whether the contents of a CDict - * are used in place, or copied into the working context. - * Accepts values from the ZSTD_dictAttachPref_e enum. - * See the comments on that enum for an explanation of the feature. */ -#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 - -/* Controls how the literals are compressed (default is auto). - * The value must be of type ZSTD_literalCompressionMode_e. - * See ZSTD_literalCompressionMode_t enum definition for details. - */ -#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 - -/* Tries to fit compressed block size to be around targetCBlockSize. - * No target when targetCBlockSize == 0. - * There is no guarantee on compressed block size (default:0) */ -#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6 - -/* User's best guess of source size. - * Hint is not valid when srcSizeHint == 0. - * There is no guarantee that hint is close to actual source size, - * but compression ratio may regress significantly if guess considerably underestimates */ -#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7 - -/*! ZSTD_CCtx_getParameter() : - * Get the requested compression parameter value, selected by enum ZSTD_cParameter, - * and store it into int* value. - * @return : 0, or an error code (which can be tested with ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); - - -/*! ZSTD_CCtx_params : - * Quick howto : - * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure - * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into - * an existing ZSTD_CCtx_params structure. - * This is similar to - * ZSTD_CCtx_setParameter(). - * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to - * an existing CCtx. - * These parameters will be applied to - * all subsequent frames. - * - ZSTD_compressStream2() : Do compression using the CCtx. - * - ZSTD_freeCCtxParams() : Free the memory. - * - * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() - * for static allocation of CCtx for single-threaded compression. - */ -ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); -ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); - -/*! ZSTD_CCtxParams_reset() : - * Reset params to default values. - */ -ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); - -/*! ZSTD_CCtxParams_init() : - * Initializes the compression parameters of cctxParams according to - * compression level. All other parameters are reset to their default values. - */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); - -/*! ZSTD_CCtxParams_init_advanced() : - * Initializes the compression and frame parameters of cctxParams according to - * params. All other parameters are reset to their default values. - */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); - -/*! ZSTD_CCtxParams_setParameter() : - * Similar to ZSTD_CCtx_setParameter. - * Set one compression parameter, selected by enum ZSTD_cParameter. - * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams(). - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); - -/*! ZSTD_CCtxParams_getParameter() : - * Similar to ZSTD_CCtx_getParameter. - * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); - -/*! ZSTD_CCtx_setParametersUsingCCtxParams() : - * Apply a set of ZSTD_CCtx_params to the compression context. - * This can be done even after compression is started, - * if nbWorkers==0, this will have no impact until a new compression is started. - * if nbWorkers>=1, new parameters will be picked up at next job, - * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). - */ -ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( - ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); - -/*! ZSTD_compressStream2_simpleArgs() : - * Same as ZSTD_compressStream2(), - * but using only integral types as arguments. - * This variant might be helpful for binders from dynamic languages - * which have troubles handling structures containing memory pointers. - */ -ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( - ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos, - ZSTD_EndDirective endOp); - - -/*************************************** -* Advanced decompression functions -***************************************/ - -/*! ZSTD_isFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. - * Note 3 : Skippable Frame Identifiers are considered valid. */ -ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); - -/*! ZSTD_createDDict_byReference() : - * Create a digested dictionary, ready to start decompression operation without startup delay. - * Dictionary content is referenced, and therefore stays in dictBuffer. - * It is important that dictBuffer outlives DDict, - * it must remain read accessible throughout the lifetime of DDict */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); - -/*! ZSTD_DCtx_loadDictionary_byReference() : - * Same as ZSTD_DCtx_loadDictionary(), - * but references `dict` content instead of copying it into `dctx`. - * This saves memory if `dict` remains around., - * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); - -/*! ZSTD_DCtx_loadDictionary_advanced() : - * Same as ZSTD_DCtx_loadDictionary(), - * but gives direct control over - * how to load the dictionary (by copy ? by reference ?) - * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); - -/*! ZSTD_DCtx_refPrefix_advanced() : - * Same as ZSTD_DCtx_refPrefix(), but gives finer control over - * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); - -/*! ZSTD_DCtx_setMaxWindowSize() : - * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. - * This protects a decoder context from reserving too much memory for itself (potential attack scenario). - * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. - * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) - * @return : 0, or an error code (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); - -/* ZSTD_d_format - * experimental parameter, - * allowing selection between ZSTD_format_e input compression formats - */ -#define ZSTD_d_format ZSTD_d_experimentalParam1 -/* ZSTD_d_stableOutBuffer - * Experimental parameter. - * Default is 0 == disabled. Set to 1 to enable. - * - * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same - * between calls, except for the modifications that zstd makes to pos (the - * caller must not modify pos). This is checked by the decompressor, and - * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer - * MUST be large enough to fit the entire decompressed frame. This will be - * checked when the frame content size is known. The data in the ZSTD_outBuffer - * in the range [dst, dst + pos) MUST not be modified during decompression - * or you will get data corruption. - * - * When this flags is enabled zstd won't allocate an output buffer, because - * it can write directly to the ZSTD_outBuffer, but it will still allocate - * an input buffer large enough to fit any compressed block. This will also - * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. - * If you need to avoid the input buffer allocation use the buffer-less - * streaming API. - * - * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using - * this flag is ALWAYS memory safe, and will never access out-of-bounds - * memory. However, decompression WILL fail if you violate the preconditions. - * - * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST - * not be modified during decompression or you will get data corruption. This - * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate - * matches. Normally zstd maintains its own buffer for this purpose, but passing - * this flag tells zstd to use the user provided buffer. - */ -#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2 - -/*! ZSTD_DCtx_setFormat() : - * Instruct the decoder context about what kind of data to decode next. - * This instruction is mandatory to decode data without a fully-formed header, - * such ZSTD_f_zstd1_magicless for example. - * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); - -/*! ZSTD_decompressStream_simpleArgs() : - * Same as ZSTD_decompressStream(), - * but using only integral types as arguments. - * This can be helpful for binders from dynamic languages - * which have troubles handling structures containing memory pointers. - */ -ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( - ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos); - - -/******************************************************************** -* Advanced streaming functions -* Warning : most of these functions are now redundant with the Advanced API. -* Once Advanced API reaches "stable" status, -* redundant functions will be deprecated, and then at some point removed. -********************************************************************/ - -/*===== Advanced Streaming compression functions =====*/ -/**! ZSTD_initCStream_srcSize() : - * This function is deprecated, and equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * - * pledgedSrcSize must be correct. If it is not known at init time, use - * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, - * "0" also disables frame content size field. It may be enabled in the future. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t -ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, - int compressionLevel, - unsigned long long pledgedSrcSize); - -/**! ZSTD_initCStream_usingDict() : - * This function is deprecated, and is equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - * - * Creates of an internal CDict (incompatible with static CCtx), except if - * dict == NULL or dictSize < 8, in which case no dict is used. - * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if - * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t -ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - int compressionLevel); - -/**! ZSTD_initCStream_advanced() : - * This function is deprecated, and is approximately equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd parameter and leave the rest as-is. - * for ((param, value) : params) { - * ZSTD_CCtx_setParameter(zcs, param, value); - * } - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - * - * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. - * pledgedSrcSize must be correct. - * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t -ZSTD_initCStream_advanced(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - ZSTD_parameters params, - unsigned long long pledgedSrcSize); - -/**! ZSTD_initCStream_usingCDict() : - * This function is deprecated, and equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, cdict); - * - * note : cdict will just be referenced, and must outlive compression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); - -/**! ZSTD_initCStream_usingCDict_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd frame parameter and leave the rest as-is. - * for ((fParam, value) : fParams) { - * ZSTD_CCtx_setParameter(zcs, fParam, value); - * } - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * ZSTD_CCtx_refCDict(zcs, cdict); - * - * same as ZSTD_initCStream_usingCDict(), with control over frame parameters. - * pledgedSrcSize must be correct. If srcSize is not known at init time, use - * value ZSTD_CONTENTSIZE_UNKNOWN. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t -ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams, - unsigned long long pledgedSrcSize); - -/*! ZSTD_resetCStream() : - * This function is deprecated, and is equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * - * start a new frame, using same parameters from previous frame. - * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. - * Note that zcs must be init at least once before using ZSTD_resetCStream(). - * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. - * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. - * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, - * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. - * @return : 0, or an error code (which can be tested using ZSTD_isError()) - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); - - -typedef struct { - unsigned long long ingested; /* nb input bytes read and buffered */ - unsigned long long consumed; /* nb input bytes actually compressed */ - unsigned long long produced; /* nb of compressed bytes generated and buffered */ - unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ - unsigned currentJobID; /* MT only : latest started job nb */ - unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */ -} ZSTD_frameProgression; - -/* ZSTD_getFrameProgression() : - * tells how much data has been ingested (read from input) - * consumed (input actually compressed) and produced (output) for current frame. - * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. - * Aggregates progression inside active worker threads. - */ -ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); - -/*! ZSTD_toFlushNow() : - * Tell how many bytes are ready to be flushed immediately. - * Useful for multithreading scenarios (nbWorkers >= 1). - * Probe the oldest active job, defined as oldest job not yet entirely flushed, - * and check its output buffer. - * @return : amount of data stored in oldest job and ready to be flushed immediately. - * if @return == 0, it means either : - * + there is no active job (could be checked with ZSTD_frameProgression()), or - * + oldest job is still actively compressing data, - * but everything it has produced has also been flushed so far, - * therefore flush speed is limited by production speed of oldest job - * irrespective of the speed of concurrent (and newer) jobs. - */ -ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); - - -/*===== Advanced Streaming decompression functions =====*/ -/** - * This function is deprecated, and is equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); - * - * note: no dictionary will be used if dict == NULL or dictSize < 8 - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); - -/** - * This function is deprecated, and is equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * ZSTD_DCtx_refDDict(zds, ddict); - * - * note : ddict is referenced, it must outlive decompression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); - -/** - * This function is deprecated, and is equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * - * re-use decompression parameters from previous init; saves dictionary loading - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); - - -/********************************************************************* -* Buffer-less and synchronous inner streaming functions -* -* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. -* But it's also a complex one, with several restrictions, documented below. -* Prefer normal streaming API for an easier experience. -********************************************************************* */ - -/** - Buffer-less streaming compression (synchronous mode) - - A ZSTD_CCtx object is required to track streaming operations. - Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. - ZSTD_CCtx object can be re-used multiple times within successive compression operations. - - Start by initializing a context. - Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, - or ZSTD_compressBegin_advanced(), for finer parameter control. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() - - Then, consume your input using ZSTD_compressContinue(). - There are some important considerations to keep in mind when using this advanced function : - - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only. - - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks. - - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario. - Worst case evaluation is provided by ZSTD_compressBound(). - ZSTD_compressContinue() doesn't guarantee recover after a failed compression. - - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog). - It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks) - - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps. - In which case, it will "discard" the relevant memory section from its history. - - Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. - It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. - Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. - - `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. -*/ - -/*===== Buffer-less streaming compression functions =====*/ -ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ - -ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - - -/*- - Buffer-less streaming decompression (synchronous mode) - - A ZSTD_DCtx object is required to track streaming operations. - Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. - A ZSTD_DCtx object can be re-used multiple times. - - First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). - Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. - Data fragment must be large enough to ensure successful decoding. - `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. - @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. - >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. - errorCode, which can be tested using ZSTD_isError(). - - It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, - such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). - Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. - As a consequence, check that values remain within valid application range. - For example, do not allocate memory blindly, check that `windowSize` is within expectation. - Each application can set its own limits, depending on local restrictions. - For extended interoperability, it is recommended to support `windowSize` of at least 8 MB. - - ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes. - ZSTD_decompressContinue() is very sensitive to contiguity, - if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, - or that previous contiguous segment is large enough to properly handle maximum back-reference distance. - There are multiple ways to guarantee this condition. - - The most memory efficient way is to use a round buffer of sufficient size. - Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), - which can @return an error code if required value is too large for current system (in 32-bits mode). - In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, - up to the moment there is not enough room left in the buffer to guarantee decoding another full block, - which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. - At which point, decoding can resume from the beginning of the buffer. - Note that already decoded data stored in the buffer should be flushed before being overwritten. - - There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory. - - Finally, if you control the compression process, you can also ignore all buffer size rules, - as long as the encoder and decoder progress in "lock-step", - aka use exactly the same buffer sizes, break contiguity at the same place, etc. - - Once buffers are setup, start decompression, with ZSTD_decompressBegin(). - If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict(). - - Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. - ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). - ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. - - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). - It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. - It can also be an error code, which can be tested with ZSTD_isError(). - - A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. - Context can then be reset to start a new decompression. - - Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). - This information is not required to properly decode a frame. - - == Special case : skippable frames == - - Skippable frames allow integration of user-defined data into a flow of concatenated frames. - Skippable frames will be ignored (skipped) by decompressor. - The format of skippable frames is as follows : - a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F - b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits - c) Frame Content - any content (User Data) of length equal to Frame Size - For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame. - For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content. -*/ - -/*===== Buffer-less streaming decompression functions =====*/ -typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; -typedef struct { - unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ - unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ - unsigned blockSizeMax; - ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ - unsigned headerSize; - unsigned dictID; - unsigned checksumFlag; -} ZSTD_frameHeader; - -/*! ZSTD_getFrameHeader() : - * decode Frame Header, or requires larger `srcSize`. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ -/*! ZSTD_getFrameHeader_advanced() : - * same as ZSTD_getFrameHeader(), - * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); -ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ - -ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); - -ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - -/* misc */ -ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); -typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; -ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); - - - - -/* ============================ */ -/** Block level API */ -/* ============================ */ - -/*! - Block functions produce and decode raw zstd blocks, without frame metadata. - Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). - But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. - - A few rules to respect : - - Compressing and decompressing require a context structure - + Use ZSTD_createCCtx() and ZSTD_createDCtx() - - It is necessary to init context before starting - + compression : any ZSTD_compressBegin*() variant, including with dictionary - + decompression : any ZSTD_decompressBegin*() variant, including with dictionary - + copyCCtx() and copyDCtx() can be used too - - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB - + If input is larger than a block size, it's necessary to split input data into multiple blocks - + For inputs larger than a single block, consider using regular ZSTD_compress() instead. - Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block. - - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) ! - ===> In which case, nothing is produced into `dst` ! - + User __must__ test for such outcome and deal directly with uncompressed data - + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0. - Doing so would mess up with statistics history, leading to potential data corruption. - + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !! - + In case of multiple successive blocks, should some of them be uncompressed, - decoder must be informed of their existence in order to follow proper history. - Use ZSTD_insertBlock() for such a case. -*/ - -/*===== Raw zstd block functions =====*/ -ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); -ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ - - -#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ - -#if defined (__cplusplus) -} -#endif diff --git a/dist/darwin_amd64/include/zstd_errors.h b/dist/darwin_amd64/include/zstd_errors.h deleted file mode 100644 index 998398e..0000000 --- a/dist/darwin_amd64/include/zstd_errors.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_ERRORS_H_398273423 -#define ZSTD_ERRORS_H_398273423 - -#if defined (__cplusplus) -extern "C" { -#endif - -/*===== dependency =====*/ -#include /* size_t */ - - -/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDERRORLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define ZSTDERRORLIB_VISIBILITY -# endif -#endif -#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY -#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY -#endif - -/*-********************************************* - * Error codes list - *-********************************************* - * Error codes _values_ are pinned down since v1.3.1 only. - * Therefore, don't rely on values if you may link to any version < v1.3.1. - * - * Only values < 100 are considered stable. - * - * note 1 : this API shall be used with static linking only. - * dynamic linking is not yet officially supported. - * note 2 : Prefer relying on the enum than on its value whenever possible - * This is the only supported way to use the error list < v1.3.1 - * note 3 : ZSTD_isError() is always correct, whatever the library version. - **********************************************/ -typedef enum { - ZSTD_error_no_error = 0, - ZSTD_error_GENERIC = 1, - ZSTD_error_prefix_unknown = 10, - ZSTD_error_version_unsupported = 12, - ZSTD_error_frameParameter_unsupported = 14, - ZSTD_error_frameParameter_windowTooLarge = 16, - ZSTD_error_corruption_detected = 20, - ZSTD_error_checksum_wrong = 22, - ZSTD_error_dictionary_corrupted = 30, - ZSTD_error_dictionary_wrong = 32, - ZSTD_error_dictionaryCreation_failed = 34, - ZSTD_error_parameter_unsupported = 40, - ZSTD_error_parameter_outOfBound = 42, - ZSTD_error_tableLog_tooLarge = 44, - ZSTD_error_maxSymbolValue_tooLarge = 46, - ZSTD_error_maxSymbolValue_tooSmall = 48, - ZSTD_error_stage_wrong = 60, - ZSTD_error_init_missing = 62, - ZSTD_error_memory_allocation = 64, - ZSTD_error_workSpace_tooSmall= 66, - ZSTD_error_dstSize_tooSmall = 70, - ZSTD_error_srcSize_wrong = 72, - ZSTD_error_dstBuffer_null = 74, - /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ - ZSTD_error_frameIndex_tooLarge = 100, - ZSTD_error_seekableIO = 102, - ZSTD_error_dstBuffer_wrong = 104, - ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ -} ZSTD_ErrorCode; - -/*! ZSTD_getErrorCode() : - convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, - which can be used to compare with enum list published above */ -ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); -ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ - - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_ERRORS_H_398273423 */ diff --git a/dist/darwin_amd64/lib/liblz4.a b/dist/darwin_amd64/lib/liblz4.a deleted file mode 100644 index c5bc828..0000000 Binary files a/dist/darwin_amd64/lib/liblz4.a and /dev/null differ diff --git a/dist/darwin_amd64/lib/librocksdb.a b/dist/darwin_amd64/lib/librocksdb.a deleted file mode 100644 index 8f41bf5..0000000 Binary files a/dist/darwin_amd64/lib/librocksdb.a and /dev/null differ diff --git a/dist/darwin_amd64/lib/libsnappy.a b/dist/darwin_amd64/lib/libsnappy.a deleted file mode 100644 index fffdbf9..0000000 Binary files a/dist/darwin_amd64/lib/libsnappy.a and /dev/null differ diff --git a/dist/darwin_amd64/lib/libz.a b/dist/darwin_amd64/lib/libz.a deleted file mode 100644 index 0c292a8..0000000 Binary files a/dist/darwin_amd64/lib/libz.a and /dev/null differ diff --git a/dist/darwin_amd64/lib/libzstd.a b/dist/darwin_amd64/lib/libzstd.a deleted file mode 100644 index 8083621..0000000 Binary files a/dist/darwin_amd64/lib/libzstd.a and /dev/null differ diff --git a/dist/linux_amd64/include/rocksdb/advanced_options.h b/dist/linux_amd64/include/rocksdb/advanced_options.h index c76c604..4f2d376 100644 --- a/dist/linux_amd64/include/rocksdb/advanced_options.h +++ b/dist/linux_amd64/include/rocksdb/advanced_options.h @@ -237,6 +237,7 @@ struct AdvancedColumnFamilyOptions { // achieve point-in-time consistency using snapshot or iterator (assuming // concurrent updates). Hence iterator and multi-get will return results // which are not consistent as of any point-in-time. + // Backward iteration on memtables will not work either. // If inplace_callback function is not set, // Put(key, new_value) will update inplace the existing_value iff // * key exists in current memtable @@ -732,7 +733,8 @@ struct AdvancedColumnFamilyOptions { // only pointers to them are stored in SST files. This can reduce write // amplification for large-value use cases at the cost of introducing a level // of indirection for reads. See also the options min_blob_size, - // blob_file_size, and blob_compression_type below. + // blob_file_size, blob_compression_type, enable_blob_garbage_collection, + // and blob_garbage_collection_age_cutoff below. // // Default: false // @@ -772,6 +774,30 @@ struct AdvancedColumnFamilyOptions { // Dynamically changeable through the SetOptions() API CompressionType blob_compression_type = kNoCompression; + // UNDER CONSTRUCTION -- DO NOT USE + // Enables garbage collection of blobs. Blob GC is performed as part of + // compaction. Valid blobs residing in blob files older than a cutoff get + // relocated to new files as they are encountered during compaction, which + // makes it possible to clean up blob files once they contain nothing but + // obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. + // + // Default: false + // + // Dynamically changeable through the SetOptions() API + bool enable_blob_garbage_collection = false; + + // UNDER CONSTRUCTION -- DO NOT USE + // The cutoff in terms of blob file age for garbage collection. Blobs in + // the oldest N blob files will be relocated when encountered during + // compaction, where N = garbage_collection_cutoff * number_of_blob_files. + // Note that enable_blob_garbage_collection has to be set in order for this + // option to have any effect. + // + // Default: 0.25 + // + // Dynamically changeable through the SetOptions() API + double blob_garbage_collection_age_cutoff = 0.25; + // Create ColumnFamilyOptions with default values for all fields AdvancedColumnFamilyOptions(); // Create ColumnFamilyOptions from Options diff --git a/dist/linux_amd64/include/rocksdb/c.h b/dist/linux_amd64/include/rocksdb/c.h index b8c72ae..8c3668e 100644 --- a/dist/linux_amd64/include/rocksdb/c.h +++ b/dist/linux_amd64/include/rocksdb/c.h @@ -71,6 +71,7 @@ extern "C" { typedef struct rocksdb_t rocksdb_t; typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; +typedef struct rocksdb_backupable_db_options_t rocksdb_backupable_db_options_t; typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; typedef struct rocksdb_cache_t rocksdb_cache_t; typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; @@ -145,6 +146,10 @@ extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary( extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( const rocksdb_options_t* options, const char* path, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* +rocksdb_backup_engine_open_opts(const rocksdb_backupable_db_options_t* options, + rocksdb_env_t* env, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); @@ -203,6 +208,100 @@ extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( rocksdb_backup_engine_t* be); +/* BackupableDBOptions */ + +extern ROCKSDB_LIBRARY_API rocksdb_backupable_db_options_t* +rocksdb_backupable_db_options_create(const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backupable_db_options_set_backup_dir( + rocksdb_backupable_db_options_t* options, const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backupable_db_options_set_env( + rocksdb_backupable_db_options_t* options, rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_share_table_files( + rocksdb_backupable_db_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backupable_db_options_get_share_table_files( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backupable_db_options_set_sync( + rocksdb_backupable_db_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_backupable_db_options_get_sync( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_destroy_old_data( + rocksdb_backupable_db_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backupable_db_options_get_destroy_old_data( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_backup_log_files( + rocksdb_backupable_db_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backupable_db_options_get_backup_log_files( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_backup_rate_limit( + rocksdb_backupable_db_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backupable_db_options_get_backup_rate_limit( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_restore_rate_limit( + rocksdb_backupable_db_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backupable_db_options_get_restore_rate_limit( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_max_background_operations( + rocksdb_backupable_db_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backupable_db_options_get_max_background_operations( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_callback_trigger_interval_size( + rocksdb_backupable_db_options_t* options, uint64_t size); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backupable_db_options_get_callback_trigger_interval_size( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_max_valid_backups_to_open( + rocksdb_backupable_db_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backupable_db_options_get_max_valid_backups_to_open( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backupable_db_options_set_share_files_with_checksum_naming( + rocksdb_backupable_db_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backupable_db_options_get_share_files_with_checksum_naming( + rocksdb_backupable_db_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backupable_db_options_destroy( + rocksdb_backupable_db_options_t*); + +/* Checkpoint */ + extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr); @@ -1616,12 +1715,20 @@ extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(); extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(); extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_background_threads( + rocksdb_env_t* env); extern ROCKSDB_LIBRARY_API void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_high_priority_background_threads( + rocksdb_env_t* env); extern ROCKSDB_LIBRARY_API void rocksdb_env_set_low_priority_background_threads( rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_low_priority_background_threads( + rocksdb_env_t* env); extern ROCKSDB_LIBRARY_API void rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int +rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env); extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( rocksdb_env_t* env); extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_io_priority(rocksdb_env_t* env); @@ -1728,21 +1835,39 @@ rocksdb_universal_compaction_options_create(); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_set_size_ratio( rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_size_ratio( + rocksdb_universal_compaction_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_set_min_merge_width( rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_min_merge_width( + rocksdb_universal_compaction_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_set_max_merge_width( rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_merge_width( + rocksdb_universal_compaction_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_set_max_size_amplification_percent( rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_set_compression_size_percent( rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_compression_size_percent( + rocksdb_universal_compaction_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_set_stop_style( rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_stop_style( + rocksdb_universal_compaction_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( rocksdb_universal_compaction_options_t*); @@ -1751,6 +1876,9 @@ rocksdb_fifo_compaction_options_create(); extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_set_max_table_files_size( rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_fifo_compaction_options_get_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts); extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( rocksdb_fifo_compaction_options_t* fifo_opts); diff --git a/dist/linux_amd64/include/rocksdb/configurable.h b/dist/linux_amd64/include/rocksdb/configurable.h index f4bfbf5..95c5cf4 100644 --- a/dist/linux_amd64/include/rocksdb/configurable.h +++ b/dist/linux_amd64/include/rocksdb/configurable.h @@ -270,11 +270,6 @@ class Configurable { // True once the object is prepared. Once the object is prepared, only // mutable options can be configured. bool prepared_; - // If this class is a wrapper (has-a), this method should be - // over-written to return the inner configurable (like an EnvWrapper). - // This method should NOT recurse, but should instead return the - // direct Inner object. - virtual Configurable* Inner() const { return nullptr; } // Returns the raw pointer for the associated named option. // The name is typically the name of an option registered via the diff --git a/dist/linux_amd64/include/rocksdb/customizable.h b/dist/linux_amd64/include/rocksdb/customizable.h new file mode 100644 index 0000000..366c756 --- /dev/null +++ b/dist/linux_amd64/include/rocksdb/customizable.h @@ -0,0 +1,138 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include "rocksdb/configurable.h" +#include "rocksdb/status.h" + +namespace ROCKSDB_NAMESPACE { +/** + * Customizable a base class used by the rocksdb that describes a + * standard way of configuring and creating objects. Customizable objects + * are configurable objects that can be created from an ObjectRegistry. + * + * Customizable classes are used when there are multiple potential + * implementations of a class for use by RocksDB (e.g. Table, Cache, + * MergeOperator, etc). The abstract base class is expected to define a method + * declaring its type and a factory method for creating one of these, such as: + * static const char *Type() { return "Table"; } + * static Status CreateFromString(const ConfigOptions& options, + * const std::string& id, + * std::shared_ptr* result); + * The "Type" string is expected to be unique (no two base classes are the same + * type). This factory is expected, based on the options and id, create and + * return the appropriate derived type of the customizable class (e.g. + * BlockBasedTableFactory, PlainTableFactory, etc). For extension developers, + * helper classes and methods are provided for writing this factory. + * + * Instances of a Customizable class need to define: + * - A "static const char *kClassName()" method. This method defines the name + * of the class instance (e.g. BlockBasedTable, LRUCache) and is used by the + * CheckedCast method. + * - The Name() of the object. This name is used when creating and saving + * instances of this class. Typically this name will be the same as + * kClassName(). + * + * Additionally, Customizable classes should register any options used to + * configure themselves with the Configurable subsystem. + * + * When a Customizable is being created, the "name" property specifies + * the name of the instance being created. + * For custom objects, their configuration and name can be specified by: + * [prop]={name=X;option 1 = value1[; option2=value2...]} + * + * [prop].name=X + * [prop].option1 = value1 + * + * [prop].name=X + * X.option1 =value1 + */ +class Customizable : public Configurable { + public: + virtual ~Customizable() {} + + // Returns the name of this class of Customizable + virtual const char* Name() const = 0; + + // Returns an identifier for this Customizable. + // This could be its name or something more complex (like its URL/pattern). + // Used for pretty printing. + virtual std::string GetId() const { + std::string id = Name(); + return id; + } + + // This is typically determined by if the input name matches the + // name of this object. + // This method is typically used in conjunction with CheckedCast to find the + // derived class instance from its base. For example, if you have an Env + // and want the "Default" env, you would IsInstanceOf("Default") to get + // the default implementation. This method should be used when you need a + // specific derivative or implementation of a class. + // + // Intermediary caches (such as SharedCache) may wish to override this method + // to check for the intermediary name (SharedCache). Classes with multiple + // potential names (e.g. "PosixEnv", "DefaultEnv") may also wish to override + // this method. + // + // @param name The name of the instance to find. + // Returns true if the class is an instance of the input name. + virtual bool IsInstanceOf(const std::string& name) const { + return name == Name(); + } + + // Returns the named instance of the Customizable as a T*, or nullptr if not + // found. This method uses IsInstanceOf to find the appropriate class instance + // and then casts it to the expected return type. + template + const T* CheckedCast() const { + if (IsInstanceOf(T::kClassName())) { + return static_cast(this); + } else { + return nullptr; + } + } + + template + T* CheckedCast() { + if (IsInstanceOf(T::kClassName())) { + return static_cast(this); + } else { + return nullptr; + } + } + + // Checks to see if this Customizable is equivalent to other. + // This method assumes that the two objects are of the same class. + // @param config_options Controls how the options are compared. + // @param other The other object to compare to. + // @param mismatch If the objects do not match, this parameter contains + // the name of the option that triggered the match failure. + // @param True if the objects match, false otherwise. + // @see Configurable::AreEquivalent for more details + bool AreEquivalent(const ConfigOptions& config_options, + const Configurable* other, + std::string* mismatch) const override; +#ifndef ROCKSDB_LITE + // Gets the value of the option associated with the input name + // @see Configurable::GetOption for more details + Status GetOption(const ConfigOptions& config_options, const std::string& name, + std::string* value) const override; + +#endif // ROCKSDB_LITE + protected: + // Given a name (e.g. rocksdb.my.type.opt), returns the short name (opt) + std::string GetOptionName(const std::string& long_name) const override; +#ifndef ROCKSDB_LITE + std::string SerializeOptions(const ConfigOptions& options, + const std::string& prefix) const override; +#endif // ROCKSDB_LITE +}; + +} // namespace ROCKSDB_NAMESPACE diff --git a/dist/linux_amd64/include/rocksdb/db.h b/dist/linux_amd64/include/rocksdb/db.h index 6eab3f3..840e1b4 100644 --- a/dist/linux_amd64/include/rocksdb/db.h +++ b/dist/linux_amd64/include/rocksdb/db.h @@ -1443,6 +1443,12 @@ class DB { const ExportImportFilesMetaData& metadata, ColumnFamilyHandle** handle) = 0; + // Verify the checksums of files in db. Currently the whole-file checksum of + // table files are checked. + virtual Status VerifyFileChecksums(const ReadOptions& /*read_options*/) { + return Status::NotSupported("File verification not supported"); + } + // Verify the block checksums of files in db. The block checksums of table // files are checked. virtual Status VerifyChecksum(const ReadOptions& read_options) = 0; @@ -1599,14 +1605,14 @@ class DB { return Status::NotSupported("EndTrace() is not implemented."); } - // StartIOTrace and EndIOTrace are experimental. They are not enabled yet. + // IO Tracing operations. Use EndIOTrace() to stop tracing. virtual Status StartIOTrace(Env* /*env*/, const TraceOptions& /*options*/, std::unique_ptr&& /*trace_writer*/) { - return Status::NotSupported("StartTrace() is not implemented."); + return Status::NotSupported("StartIOTrace() is not implemented."); } virtual Status EndIOTrace() { - return Status::NotSupported("StartTrace() is not implemented."); + return Status::NotSupported("EndIOTrace() is not implemented."); } // Trace block cache accesses. Use EndBlockCacheTrace() to stop tracing. diff --git a/dist/linux_amd64/include/rocksdb/env.h b/dist/linux_amd64/include/rocksdb/env.h index 955d591..a129b19 100644 --- a/dist/linux_amd64/include/rocksdb/env.h +++ b/dist/linux_amd64/include/rocksdb/env.h @@ -452,9 +452,15 @@ class Env { // Sleep/delay the thread for the prescribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; - // Get the current host name. + // Get the current host name as a null terminated string iff the string + // length is < len. The hostname should otherwise be truncated to len. virtual Status GetHostName(char* name, uint64_t len) = 0; + // Get the current hostname from the given env as a std::string in result. + // The result may be truncated if the hostname is too + // long + virtual Status GetHostNameString(std::string* result); + // Get the number of seconds since the Epoch, 1970-01-01 00:00:00 (UTC). // Only overwrites *unix_time on success. virtual Status GetCurrentTime(int64_t* unix_time) = 0; @@ -576,6 +582,9 @@ class Env { // Pointer to the underlying FileSystem implementation std::shared_ptr file_system_; + + private: + static const size_t kMaxHostNameLen = 256; }; // The factory function to construct a ThreadStatusUpdater. Any Env @@ -1042,11 +1051,17 @@ class Logger { virtual void LogHeader(const char* format, va_list ap) { // Default implementation does a simple INFO level log write. // Please override as per the logger class requirement. - Logv(format, ap); + Logv(InfoLogLevel::INFO_LEVEL, format, ap); } // Write an entry to the log file with the specified format. - virtual void Logv(const char* format, va_list ap) = 0; + // + // Users who override the `Logv()` overload taking `InfoLogLevel` do not need + // to implement this, unless they explicitly invoke it in + // `Logv(InfoLogLevel, ...)`. + virtual void Logv(const char* /* format */, va_list /* ap */) { + assert(false); + } // Write an entry to the log file with the specified log level // and format. Any log with level under the internal log level diff --git a/dist/linux_amd64/include/rocksdb/file_system.h b/dist/linux_amd64/include/rocksdb/file_system.h index 3683491..e38929d 100644 --- a/dist/linux_amd64/include/rocksdb/file_system.h +++ b/dist/linux_amd64/include/rocksdb/file_system.h @@ -262,7 +262,7 @@ class FileSystem { virtual IOStatus ReopenWritableFile( const std::string& /*fname*/, const FileOptions& /*options*/, std::unique_ptr* /*result*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); + return IOStatus::NotSupported("ReopenWritableFile"); } // Reuse an existing file by renaming it and opening it as writable. @@ -523,7 +523,7 @@ class FileSystem { const IOOptions& /*options*/, uint64_t* /*diskfree*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); + return IOStatus::NotSupported("GetFreeSpace"); } virtual IOStatus IsDirectory(const std::string& /*path*/, @@ -584,7 +584,7 @@ class FSSequentialFile { const IOOptions& /*options*/, Slice* /*result*/, char* /*scratch*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); + return IOStatus::NotSupported("PositionedRead"); } // If you're adding methods here, remember to add them to @@ -638,7 +638,7 @@ class FSRandomAccessFile { virtual IOStatus Prefetch(uint64_t /*offset*/, size_t /*n*/, const IOOptions& /*options*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); + return IOStatus::NotSupported("Prefetch"); } // Read a bunch of blocks as described by reqs. The blocks can @@ -770,7 +770,7 @@ class FSWritableFile { uint64_t /* offset */, const IOOptions& /*options*/, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); + return IOStatus::NotSupported("PositionedAppend"); } // EXPERIMENTAL / CURRENTLY UNUSED @@ -782,7 +782,7 @@ class FSWritableFile { const IOOptions& /*options*/, const DataVerificationInfo& /* verification_info */, IODebugContext* /*dbg*/) { - return IOStatus::NotSupported(); + return IOStatus::NotSupported("PositionedAppend"); } // Truncate is necessary to trim the file to the correct size diff --git a/dist/linux_amd64/include/rocksdb/filter_policy.h b/dist/linux_amd64/include/rocksdb/filter_policy.h index 3cd85a2..7829db1 100644 --- a/dist/linux_amd64/include/rocksdb/filter_policy.h +++ b/dist/linux_amd64/include/rocksdb/filter_policy.h @@ -212,4 +212,24 @@ class FilterPolicy { // trailing spaces in keys. extern const FilterPolicy* NewBloomFilterPolicy( double bits_per_key, bool use_block_based_builder = false); + +// An EXPERIMENTAL new Bloom alternative that saves about 30% space +// compared to Bloom filters, with about 3-4x construction time and +// similar query times. For example, if you pass in 10 for +// bloom_equivalent_bits_per_key, you'll get the same 0.95% FP rate +// as Bloom filter but only using about 7 bits per key. (This +// way of configuring the new filter is considered experimental +// and/or transitional, so is expected to go away.) +// +// Ribbon filters are ignored by previous versions of RocksDB, as if +// no filter was used. +// +// Note: this policy can generate Bloom filters in some cases. +// For very small filters (well under 1KB), Bloom fallback is by +// design, as the current Ribbon schema is not optimized to save vs. +// Bloom for such small filters. Other cases of Bloom fallback should +// be exceptional and log an appropriate warning. +extern const FilterPolicy* NewExperimentalRibbonFilterPolicy( + double bloom_equivalent_bits_per_key); + } // namespace ROCKSDB_NAMESPACE diff --git a/dist/linux_amd64/include/rocksdb/memory_allocator.h b/dist/linux_amd64/include/rocksdb/memory_allocator.h index 60256a9..5144223 100644 --- a/dist/linux_amd64/include/rocksdb/memory_allocator.h +++ b/dist/linux_amd64/include/rocksdb/memory_allocator.h @@ -45,31 +45,31 @@ struct JemallocAllocatorOptions { bool limit_tcache_size = false; // Lower bound of allocation size to use tcache, if limit_tcache_size=true. - // When used with block cache, it is recommneded to set it to block_size/4. + // When used with block cache, it is recommended to set it to block_size/4. size_t tcache_size_lower_bound = 1024; // Upper bound of allocation size to use tcache, if limit_tcache_size=true. - // When used with block cache, it is recommneded to set it to block_size. + // When used with block cache, it is recommended to set it to block_size. size_t tcache_size_upper_bound = 16 * 1024; }; -// Generate memory allocators which allocates through Jemalloc and utilize -// MADV_DONTDUMP through madvice to exclude cache items from core dump. +// Generate memory allocator which allocates through Jemalloc and utilize +// MADV_DONTDUMP through madvise to exclude cache items from core dump. // Applications can use the allocator with block cache to exclude block cache // usage from core dump. // // Implementation details: -// The JemallocNodumpAllocator creates a delicated jemalloc arena, and all -// allocations of the JemallocNodumpAllocator is through the same arena. -// The memory allocator hooks memory allocation of the arena, and call -// madvice() with MADV_DONTDUMP flag to exclude the piece of memory from -// core dump. Side benefit of using single arena would be reduce of jemalloc -// metadata for some workload. +// The JemallocNodumpAllocator creates a dedicated jemalloc arena, and all +// allocations of the JemallocNodumpAllocator are through the same arena. +// The memory allocator hooks memory allocation of the arena, and calls +// madvise() with MADV_DONTDUMP flag to exclude the piece of memory from +// core dump. Side benefit of using single arena would be reduction of jemalloc +// metadata for some workloads. // // To mitigate mutex contention for using one single arena, jemalloc tcache // (thread-local cache) is enabled to cache unused allocations for future use. -// The tcache normally incur 0.5M extra memory usage per-thread. The usage -// can be reduce by limitting allocation sizes to cache. +// The tcache normally incurs 0.5M extra memory usage per-thread. The usage +// can be reduced by limiting allocation sizes to cache. extern Status NewJemallocNodumpAllocator( JemallocAllocatorOptions& options, std::shared_ptr* memory_allocator); diff --git a/dist/linux_amd64/include/rocksdb/options.h b/dist/linux_amd64/include/rocksdb/options.h index 53c444a..79ab8af 100644 --- a/dist/linux_amd64/include/rocksdb/options.h +++ b/dist/linux_amd64/include/rocksdb/options.h @@ -24,6 +24,7 @@ #include "rocksdb/file_checksum.h" #include "rocksdb/listener.h" #include "rocksdb/sst_partitioner.h" +#include "rocksdb/types.h" #include "rocksdb/universal_compaction.h" #include "rocksdb/version.h" #include "rocksdb/write_buffer_manager.h" @@ -348,6 +349,8 @@ struct DbPath { DbPath(const std::string& p, uint64_t t) : path(p), target_size(t) {} }; +extern const char* kHostnameForDbHostId; + struct DBOptions { // The function recovers options to the option as in version 4.6. DBOptions* OldDefaults(int rocksdb_major_version = 4, @@ -390,6 +393,20 @@ struct DBOptions { // Default: true bool paranoid_checks = true; + // If true, track WALs in MANIFEST and verify them on recovery. + // + // If a WAL is tracked in MANIFEST but is missing from disk on recovery, + // or the size of the tracked WAL is larger than the WAL's on-disk size, + // an error is reported and recovery is aborted. + // + // If a WAL is not tracked in MANIFEST, then no verification will happen + // during recovery. + // + // Default: false + // FIXME(cheng): This option is part of a work in progress and does not yet + // work + bool track_and_verify_wals_in_manifest = false; + // Use the specified object to interact with the environment, // e.g. to read/write files, schedule background work, etc. In the near // future, support for doing storage operations such as read/write files @@ -1166,6 +1183,19 @@ struct DBOptions { // // Default: false bool allow_data_in_errors = false; + + // A string identifying the machine hosting the DB. This + // will be written as a property in every SST file written by the DB (or + // by offline writers such as SstFileWriter and RepairDB). It can be useful + // for troubleshooting in memory corruption caused by a failing host when + // writing a file, by tracing back to the writing host. These corruptions + // may not be caught by the checksum since they happen before checksumming. + // If left as default, the table writer will substitute it with the actual + // hostname when writing the SST file. If set to an empty stirng, the + // property will not be written to the SST file. + // + // Default: hostname + std::string db_host_id = kHostnameForDbHostId; }; // Options to control the behavior of a database (passed to DB::Open) diff --git a/dist/linux_amd64/include/rocksdb/table.h b/dist/linux_amd64/include/rocksdb/table.h index 8e0d144..a2bfe3c 100644 --- a/dist/linux_amd64/include/rocksdb/table.h +++ b/dist/linux_amd64/include/rocksdb/table.h @@ -22,7 +22,7 @@ #include #include -#include "rocksdb/configurable.h" +#include "rocksdb/customizable.h" #include "rocksdb/env.h" #include "rocksdb/options.h" #include "rocksdb/status.h" @@ -51,6 +51,55 @@ enum ChecksumType : char { kxxHash64 = 0x3, }; +// `PinningTier` is used to specify which tier of block-based tables should +// be affected by a block cache pinning setting (see +// `MetadataCacheOptions` below). +enum class PinningTier { + // For compatibility, this value specifies to fallback to the behavior + // indicated by the deprecated options, + // `pin_l0_filter_and_index_blocks_in_cache` and + // `pin_top_level_index_and_filter`. + kFallback, + + // This tier contains no block-based tables. + kNone, + + // This tier contains block-based tables that may have originated from a + // memtable flush. In particular, it includes tables from L0 that are smaller + // than 1.5 times the current `write_buffer_size`. Note these criteria imply + // it can include intra-L0 compaction outputs and ingested files, as long as + // they are not abnormally large compared to flushed files in L0. + kFlushedAndSimilar, + + // This tier contains all block-based tables. + kAll, +}; + +// `MetadataCacheOptions` contains members indicating the desired caching +// behavior for the different categories of metadata blocks. +struct MetadataCacheOptions { + // The tier of block-based tables whose top-level index into metadata + // partitions will be pinned. Currently indexes and filters may be + // partitioned. + // + // Note `cache_index_and_filter_blocks` must be true for this option to have + // any effect. Otherwise any top-level index into metadata partitions would be + // held in table reader memory, outside the block cache. + PinningTier top_level_index_pinning = PinningTier::kFallback; + + // The tier of block-based tables whose metadata partitions will be pinned. + // Currently indexes and filters may be partitioned. + PinningTier partition_pinning = PinningTier::kFallback; + + // The tier of block-based tables whose unpartitioned metadata blocks will be + // pinned. + // + // Note `cache_index_and_filter_blocks` must be true for this option to have + // any effect. Otherwise the unpartitioned meta-blocks would be held in table + // reader memory, outside the block cache. + PinningTier unpartitioned_pinning = PinningTier::kFallback; +}; + // For advanced user only struct BlockBasedTableOptions { static const char* kName() { return "BlockTableOptions"; }; @@ -79,12 +128,44 @@ struct BlockBasedTableOptions { // than data blocks. bool cache_index_and_filter_blocks_with_high_priority = true; + // DEPRECATED: This option will be removed in a future version. For now, this + // option still takes effect by updating each of the following variables that + // has the default value, `PinningTier::kFallback`: + // + // - `MetadataCacheOptions::partition_pinning` + // - `MetadataCacheOptions::unpartitioned_pinning` + // + // The updated value is chosen as follows: + // + // - `pin_l0_filter_and_index_blocks_in_cache == false` -> + // `PinningTier::kNone` + // - `pin_l0_filter_and_index_blocks_in_cache == true` -> + // `PinningTier::kFlushedAndSimilar` + // + // To migrate away from this flag, explicitly configure + // `MetadataCacheOptions` as described above. + // // if cache_index_and_filter_blocks is true and the below is true, then // filter and index blocks are stored in the cache, but a reference is // held in the "table reader" object so the blocks are pinned and only // evicted from cache when the table reader is freed. bool pin_l0_filter_and_index_blocks_in_cache = false; + // DEPRECATED: This option will be removed in a future version. For now, this + // option still takes effect by updating + // `MetadataCacheOptions::top_level_index_pinning` when it has the + // default value, `PinningTier::kFallback`. + // + // The updated value is chosen as follows: + // + // - `pin_top_level_index_and_filter == false` -> + // `PinningTier::kNone` + // - `pin_top_level_index_and_filter == true` -> + // `PinningTier::kAll` + // + // To migrate away from this flag, explicitly configure + // `MetadataCacheOptions` as described above. + // // If cache_index_and_filter_blocks is true and the below is true, then // the top-level index of partitioned filter and index blocks are stored in // the cache, but a reference is held in the "table reader" object so the @@ -92,6 +173,12 @@ struct BlockBasedTableOptions { // freed. This is not limited to l0 in LSM tree. bool pin_top_level_index_and_filter = true; + // The desired block cache pinning behavior for the different categories of + // metadata blocks. While pinning can reduce block cache contention, users + // must take care not to pin excessive amounts of data, which risks + // overflowing block cache. + MetadataCacheOptions metadata_cache_options; + // The index type that will be used for this table. enum IndexType : char { // A space efficient index block that is optimized for @@ -526,7 +613,7 @@ extern TableFactory* NewCuckooTableFactory( class RandomAccessFileReader; // A base class for table factories. -class TableFactory : public Configurable { +class TableFactory : public Customizable { public: virtual ~TableFactory() override {} @@ -540,21 +627,7 @@ class TableFactory : public Configurable { const std::string& id, std::shared_ptr* factory); - // The type of the table. - // - // The client of this package should switch to a new name whenever - // the table format implementation changes. - // - // Names starting with "rocksdb." are reserved and should not be used - // by any clients of this package. - virtual const char* Name() const = 0; - - // Returns true if the class is an instance of the input name. - // This is typically determined by if the input name matches the - // name of this object. - virtual bool IsInstanceOf(const std::string& name) const { - return name == Name(); - } + static const char* Type() { return "TableFactory"; } // Returns a Table object table that can fetch data from file specified // in parameter file. It's the caller's responsibility to make sure diff --git a/dist/linux_amd64/include/rocksdb/table_properties.h b/dist/linux_amd64/include/rocksdb/table_properties.h index ba3eca7..c6810aa 100644 --- a/dist/linux_amd64/include/rocksdb/table_properties.h +++ b/dist/linux_amd64/include/rocksdb/table_properties.h @@ -32,6 +32,7 @@ typedef std::map UserCollectedProperties; struct TablePropertiesNames { static const std::string kDbId; static const std::string kDbSessionId; + static const std::string kDbHostId; static const std::string kDataSize; static const std::string kIndexSize; static const std::string kIndexPartitions; @@ -206,6 +207,12 @@ struct TableProperties { // empty string. std::string db_session_id; + // Location of the machine hosting the DB instance + // db_host_id identifies the location of the host in some form + // (hostname by default, but can also be any string of the user's choosing). + // It can potentially change whenever the DB is opened + std::string db_host_id; + // Name of the column family with which this SST file is associated. // If column family is unknown, `column_family_name` will be an empty string. std::string column_family_name; diff --git a/dist/linux_amd64/include/rocksdb/types.h b/dist/linux_amd64/include/rocksdb/types.h index a4ab9c0..d56a7cc 100644 --- a/dist/linux_amd64/include/rocksdb/types.h +++ b/dist/linux_amd64/include/rocksdb/types.h @@ -12,11 +12,29 @@ namespace ROCKSDB_NAMESPACE { // Define all public custom types here. +using ColumnFamilyId = uint32_t; + // Represents a sequence number in a WAL file. typedef uint64_t SequenceNumber; const SequenceNumber kMinUnCommittedSeq = 1; // 0 is always committed +// The types of files RocksDB uses in a DB directory. (Available for +// advanced options.) +enum FileType { + kWalFile, + kDBLockFile, + kTableFile, + kDescriptorFile, + kCurrentFile, + kTempFile, + kInfoLogFile, // Either the current one, or an old one + kMetaDatabase, + kIdentityFile, + kOptionsFile, + kBlobFile +}; + // User-oriented representation of internal key types. // Ordering of this enum entries should not change. enum EntryType { diff --git a/dist/linux_amd64/include/rocksdb/utilities/options_type.h b/dist/linux_amd64/include/rocksdb/utilities/options_type.h index 2bd081a..36e1e09 100644 --- a/dist/linux_amd64/include/rocksdb/utilities/options_type.h +++ b/dist/linux_amd64/include/rocksdb/utilities/options_type.h @@ -49,6 +49,7 @@ enum class OptionType { kStruct, kVector, kConfigurable, + kCustomizable, kUnknown, }; @@ -93,13 +94,14 @@ enum class OptionTypeFlags : uint32_t { kCompareLoose = ConfigOptions::kSanityLevelLooselyCompatible, kCompareExact = ConfigOptions::kSanityLevelExactMatch, - kMutable = 0x0100, // Option is mutable - kRawPointer = 0x0200, // The option is stored as a raw pointer - kShared = 0x0400, // The option is stored as a shared_ptr - kUnique = 0x0800, // The option is stored as a unique_ptr - kAllowNull = 0x1000, // The option can be null - kDontSerialize = 0x2000, // Don't serialize the option - kDontPrepare = 0x4000, // Don't prepare or sanitize this option + kMutable = 0x0100, // Option is mutable + kRawPointer = 0x0200, // The option is stored as a raw pointer + kShared = 0x0400, // The option is stored as a shared_ptr + kUnique = 0x0800, // The option is stored as a unique_ptr + kAllowNull = 0x1000, // The option can be null + kDontSerialize = 0x2000, // Don't serialize the option + kDontPrepare = 0x4000, // Don't prepare or sanitize this option + kStringNameOnly = 0x8000, // The option serializes to a name only }; inline OptionTypeFlags operator|(const OptionTypeFlags &a, @@ -406,6 +408,103 @@ class OptionTypeInfo { }); } + // Create a new std::shared_ptr OptionTypeInfo + // This function will call the T::CreateFromString method to create a new + // std::shared_ptr object. + // + // @param offset The offset for the Customizable from the base pointer + // @param ovt How to verify this option + // @param flags, Extra flags specifying the behavior of this option + // @param _sfunc Optional function for serializing this option + // @param _efunc Optional function for comparing this option + template + static OptionTypeInfo AsCustomSharedPtr(int offset, + OptionVerificationType ovt, + OptionTypeFlags flags) { + return AsCustomSharedPtr(offset, ovt, flags, nullptr, nullptr); + } + + template + static OptionTypeInfo AsCustomSharedPtr(int offset, + OptionVerificationType ovt, + OptionTypeFlags flags, + const SerializeFunc& serialize_func, + const EqualsFunc& equals_func) { + return OptionTypeInfo( + offset, OptionType::kCustomizable, ovt, + flags | OptionTypeFlags::kShared, + [](const ConfigOptions& opts, const std::string&, + const std::string& value, char* addr) { + auto* shared = reinterpret_cast*>(addr); + return T::CreateFromString(opts, value, shared); + }, + serialize_func, equals_func); + } + + // Create a new std::unique_ptr OptionTypeInfo + // This function will call the T::CreateFromString method to create a new + // std::unique_ptr object. + // + // @param offset The offset for the Customizable from the base pointer + // @param ovt How to verify this option + // @param flags, Extra flags specifying the behavior of this option + // @param _sfunc Optional function for serializing this option + // @param _efunc Optional function for comparing this option + template + static OptionTypeInfo AsCustomUniquePtr(int offset, + OptionVerificationType ovt, + OptionTypeFlags flags) { + return AsCustomUniquePtr(offset, ovt, flags, nullptr, nullptr); + } + + template + static OptionTypeInfo AsCustomUniquePtr(int offset, + OptionVerificationType ovt, + OptionTypeFlags flags, + const SerializeFunc& serialize_func, + const EqualsFunc& equals_func) { + return OptionTypeInfo( + offset, OptionType::kCustomizable, ovt, + flags | OptionTypeFlags::kUnique, + [](const ConfigOptions& opts, const std::string&, + const std::string& value, char* addr) { + auto* unique = reinterpret_cast*>(addr); + return T::CreateFromString(opts, value, unique); + }, + serialize_func, equals_func); + } + + // Create a new Customizable* OptionTypeInfo + // This function will call the T::CreateFromString method to create a new + // T object. + // + // @param _offset The offset for the Customizable from the base pointer + // @param ovt How to verify this option + // @param flags, Extra flags specifying the behavior of this option + // @param _sfunc Optional function for serializing this option + // @param _efunc Optional function for comparing this option + template + static OptionTypeInfo AsCustomRawPtr(int offset, OptionVerificationType ovt, + OptionTypeFlags flags) { + return AsCustomRawPtr(offset, ovt, flags, nullptr, nullptr); + } + + template + static OptionTypeInfo AsCustomRawPtr(int offset, OptionVerificationType ovt, + OptionTypeFlags flags, + const SerializeFunc& serialize_func, + const EqualsFunc& equals_func) { + return OptionTypeInfo( + offset, OptionType::kCustomizable, ovt, + flags | OptionTypeFlags::kRawPointer, + [](const ConfigOptions& opts, const std::string&, + const std::string& value, char* addr) { + auto** pointer = reinterpret_cast(addr); + return T::CreateFromString(opts, value, pointer); + }, + serialize_func, equals_func); + } + bool IsEnabled(OptionTypeFlags otf) const { return (flags_ & otf) == otf; } bool IsMutable() const { return IsEnabled(OptionTypeFlags::kMutable); } @@ -475,7 +574,12 @@ class OptionTypeInfo { bool IsStruct() const { return (type_ == OptionType::kStruct); } - bool IsConfigurable() const { return (type_ == OptionType::kConfigurable); } + bool IsConfigurable() const { + return (type_ == OptionType::kConfigurable || + type_ == OptionType::kCustomizable); + } + + bool IsCustomizable() const { return (type_ == OptionType::kCustomizable); } // Returns the underlying pointer for the type at base_addr // The value returned is the underlying "raw" pointer, offset from base. @@ -660,6 +764,10 @@ Status ParseVector(const ConfigOptions& config_options, result->clear(); Status status; + // Turn off ignore_unknown_objects so we can tell if the returned + // object is valid or not. + ConfigOptions copy = config_options; + copy.ignore_unsupported_options = false; for (size_t start = 0, end = 0; status.ok() && start < value.size() && end != std::string::npos; start = end + 1) { @@ -667,10 +775,15 @@ Status ParseVector(const ConfigOptions& config_options, status = OptionTypeInfo::NextToken(value, separator, start, &end, &token); if (status.ok()) { T elem; - status = elem_info.Parse(config_options, name, token, - reinterpret_cast(&elem)); + status = + elem_info.Parse(copy, name, token, reinterpret_cast(&elem)); if (status.ok()) { result->emplace_back(elem); + } else if (config_options.ignore_unsupported_options && + status.IsNotSupported()) { + // If we were ignoring unsupported options and this one should be + // ignored, ignore it by setting the status to OK + status = Status::OK(); } } } diff --git a/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h b/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h index 93c9e9a..686452b 100644 --- a/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h +++ b/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h @@ -141,6 +141,11 @@ class StackableDB : public DB { import_options, metadata, handle); } + using DB::VerifyFileChecksums; + Status VerifyFileChecksums(const ReadOptions& read_opts) override { + return db_->VerifyFileChecksums(read_opts); + } + virtual Status VerifyChecksum() override { return db_->VerifyChecksum(); } virtual Status VerifyChecksum(const ReadOptions& options) override { @@ -367,6 +372,24 @@ class StackableDB : public DB { using DB::EndBlockCacheTrace; Status EndBlockCacheTrace() override { return db_->EndBlockCacheTrace(); } + using DB::StartIOTrace; + Status StartIOTrace(Env* env, const TraceOptions& options, + std::unique_ptr&& trace_writer) override { + return db_->StartIOTrace(env, options, std::move(trace_writer)); + } + + using DB::EndIOTrace; + Status EndIOTrace() override { return db_->EndIOTrace(); } + + using DB::StartTrace; + Status StartTrace(const TraceOptions& options, + std::unique_ptr&& trace_writer) override { + return db_->StartTrace(options, std::move(trace_writer)); + } + + using DB::EndTrace; + Status EndTrace() override { return db_->EndTrace(); } + #endif // ROCKSDB_LITE virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, diff --git a/dist/linux_amd64/include/rocksdb/utilities/transaction.h b/dist/linux_amd64/include/rocksdb/utilities/transaction.h index 6ebdbcc..b553100 100644 --- a/dist/linux_amd64/include/rocksdb/utilities/transaction.h +++ b/dist/linux_amd64/include/rocksdb/utilities/transaction.h @@ -24,6 +24,11 @@ using TransactionName = std::string; using TransactionID = uint64_t; +// An endpoint for a range of keys. +class Endpoint { + // TODO +}; + // Provides notification to the caller of SetSnapshotOnNextOperation when // the actual snapshot gets created class TransactionNotifier { diff --git a/dist/linux_amd64/include/rocksdb/utilities/transaction_db.h b/dist/linux_amd64/include/rocksdb/utilities/transaction_db.h index 8967b7e..2e1a0a1 100644 --- a/dist/linux_amd64/include/rocksdb/utilities/transaction_db.h +++ b/dist/linux_amd64/include/rocksdb/utilities/transaction_db.h @@ -202,6 +202,13 @@ struct KeyLockInfo { bool exclusive; }; +struct RangeLockInfo { + Endpoint start; + Endpoint end; + std::vector ids; + bool exclusive; +}; + struct DeadlockInfo { TransactionID m_txn_id; uint32_t m_cf_id; @@ -296,6 +303,7 @@ class TransactionDB : public StackableDB { // The mapping is column family id -> KeyLockInfo virtual std::unordered_multimap GetLockStatusData() = 0; + virtual std::vector GetDeadlockInfoBuffer() = 0; virtual void SetDeadlockInfoBufferSize(uint32_t target_size) = 0; diff --git a/dist/linux_amd64/include/rocksdb/version.h b/dist/linux_amd64/include/rocksdb/version.h index c2631d0..0fc493f 100644 --- a/dist/linux_amd64/include/rocksdb/version.h +++ b/dist/linux_amd64/include/rocksdb/version.h @@ -5,8 +5,8 @@ #pragma once #define ROCKSDB_MAJOR 6 -#define ROCKSDB_MINOR 14 -#define ROCKSDB_PATCH 6 +#define ROCKSDB_MINOR 15 +#define ROCKSDB_PATCH 2 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these diff --git a/dist/linux_amd64/lib/liblz4.a b/dist/linux_amd64/lib/liblz4.a index c81d167..a5fbb21 100644 Binary files a/dist/linux_amd64/lib/liblz4.a and b/dist/linux_amd64/lib/liblz4.a differ diff --git a/dist/linux_amd64/lib/librocksdb.a b/dist/linux_amd64/lib/librocksdb.a index 97083b0..c125c24 100644 Binary files a/dist/linux_amd64/lib/librocksdb.a and b/dist/linux_amd64/lib/librocksdb.a differ diff --git a/dist/linux_amd64/lib/libsnappy.a b/dist/linux_amd64/lib/libsnappy.a index ea5f217..2837152 100644 Binary files a/dist/linux_amd64/lib/libsnappy.a and b/dist/linux_amd64/lib/libsnappy.a differ diff --git a/dist/linux_amd64/lib/libz.a b/dist/linux_amd64/lib/libz.a index 28534f5..27a0de9 100644 Binary files a/dist/linux_amd64/lib/libz.a and b/dist/linux_amd64/lib/libz.a differ diff --git a/dist/linux_amd64/lib/libzstd.a b/dist/linux_amd64/lib/libzstd.a index 5164ad1..dcb3070 100644 Binary files a/dist/linux_amd64/lib/libzstd.a and b/dist/linux_amd64/lib/libzstd.a differ diff --git a/env.go b/env.go index d119829..93a72ed 100644 --- a/env.go +++ b/env.go @@ -33,6 +33,13 @@ func (env *Env) SetBackgroundThreads(n int) { C.rocksdb_env_set_background_threads(env.c, C.int(n)) } +// GetBackgroundThreads sets the number of background worker threads +// of a specific thread pool for this environment. +// 'LOW' is the default pool. +func (env *Env) GetBackgroundThreads() int { + return int(C.rocksdb_env_get_background_threads(env.c)) +} + // SetHighPriorityBackgroundThreads sets the size of the high priority // thread pool that can be used to prevent compactions from stalling // memtable flushes. @@ -40,6 +47,13 @@ func (env *Env) SetHighPriorityBackgroundThreads(n int) { C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n)) } +// GetHighPriorityBackgroundThreads gets the size of the high priority +// thread pool that can be used to prevent compactions from stalling +// memtable flushes. +func (env *Env) GetHighPriorityBackgroundThreads() int { + return int(C.rocksdb_env_get_high_priority_background_threads(env.c)) +} + // SetLowPriorityBackgroundThreads sets the size of the low priority // thread pool that can be used to prevent compactions from stalling // memtable flushes. @@ -47,6 +61,13 @@ func (env *Env) SetLowPriorityBackgroundThreads(n int) { C.rocksdb_env_set_low_priority_background_threads(env.c, C.int(n)) } +// GetLowPriorityBackgroundThreads gets the size of the low priority +// thread pool that can be used to prevent compactions from stalling +// memtable flushes. +func (env *Env) GetLowPriorityBackgroundThreads() int { + return int(C.rocksdb_env_get_low_priority_background_threads(env.c)) +} + // SetBottomPriorityBackgroundThreads sets the size of // thread pool that can be used to prevent bottommost compactions // from stalling memtable flushes. @@ -54,6 +75,13 @@ func (env *Env) SetBottomPriorityBackgroundThreads(n int) { C.rocksdb_env_set_bottom_priority_background_threads(env.c, C.int(n)) } +// GetBottomPriorityBackgroundThreads gets the size of +// thread pool that can be used to prevent bottommost compactions +// from stalling memtable flushes. +func (env *Env) GetBottomPriorityBackgroundThreads() int { + return int(C.rocksdb_env_get_bottom_priority_background_threads(env.c)) +} + // JoinAllThreads wait for all threads started by StartThread to terminate. func (env *Env) JoinAllThreads() { C.rocksdb_env_join_all_threads(env.c) diff --git a/env_test.go b/env_test.go index 8ac2005..483d4c9 100644 --- a/env_test.go +++ b/env_test.go @@ -1,15 +1,27 @@ package grocksdb -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestEnv(t *testing.T) { env := NewDefaultEnv() defer env.Destroy() env.SetBackgroundThreads(2) + require.Equal(t, 2, env.GetBackgroundThreads()) + env.SetHighPriorityBackgroundThreads(5) - env.SetLowPriorityBackgroundThreads(2) + require.Equal(t, 5, env.GetHighPriorityBackgroundThreads()) + + env.SetLowPriorityBackgroundThreads(6) + require.Equal(t, 6, env.GetLowPriorityBackgroundThreads()) + env.SetBottomPriorityBackgroundThreads(14) + require.Equal(t, 14, env.GetBottomPriorityBackgroundThreads()) + env.JoinAllThreads() env.LowerHighPriorityThreadPoolCPUPriority() env.LowerHighPriorityThreadPoolIOPriority() diff --git a/go.sum b/go.sum index c69ae42..783d82f 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= diff --git a/options_backupabledb.go b/options_backupabledb.go new file mode 100644 index 0000000..ed8b0f2 --- /dev/null +++ b/options_backupabledb.go @@ -0,0 +1,275 @@ +package grocksdb + +// #include "rocksdb/c.h" +// #include "grocksdb.h" +import "C" +import "unsafe" + +// ShareFilesNaming describes possible naming schemes for backup +// table file names when the table files are stored in the shared_checksum +// directory (i.e., both share_table_files and share_files_with_checksum +// are true). +type ShareFilesNaming uint32 + +const ( + // LegacyCrc32cAndFileSize indicates backup SST filenames are __.sst + // where is an unsigned decimal integer. This is the + // original/legacy naming scheme for share_files_with_checksum, + // with two problems: + // * At massive scale, collisions on this triple with different file + // contents is plausible. + // * Determining the name to use requires computing the checksum, + // so generally requires reading the whole file even if the file + // is already backed up. + // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** + LegacyCrc32cAndFileSize ShareFilesNaming = 1 + + // UseDBSessionID indicates backup SST filenames are _s.sst. This + // pair of values should be very strongly unique for a given SST file + // and easily determined before computing a checksum. The 's' indicates + // the value is a DB session id, not a checksum. + // + // Exceptions: + // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize + // will be used instead, matching the names assigned by RocksDB versions + // not supporting the newer naming scheme. + // * See also flags below. + UseDBSessionID ShareFilesNaming = 2 + + MaskNoNamingFlags ShareFilesNaming = 0xffff + + // FlagIncludeFileSize if not already part of the naming scheme, insert + // _ + // before .sst in the name. In case of user code actually parsing the + // last _ before the .sst as the file size, this preserves that + // feature of kLegacyCrc32cAndFileSize. In other words, this option makes + // official that unofficial feature of the backup metadata. + // + // We do not consider SST file sizes to have sufficient entropy to + // contribute significantly to naming uniqueness. + FlagIncludeFileSize ShareFilesNaming = 1 << 31 + + // FlagMatchInterimNaming indicates when encountering an SST file from a Facebook-internal early + // release of 6.12, use the default naming scheme in effect for + // when the SST file was generated (assuming full file checksum + // was not set to GetFileChecksumGenCrc32cFactory()). That naming is + // _.sst + // and ignores kFlagIncludeFileSize setting. + // NOTE: This flag is intended to be temporary and should be removed + // in a later release. + FlagMatchInterimNaming ShareFilesNaming = 1 << 30 + + MaskNamingFlags ShareFilesNaming = ^MaskNoNamingFlags +) + +// BackupableDBOptions represents options for backupable db. +type BackupableDBOptions struct { + c *C.rocksdb_backupable_db_options_t +} + +// NewBackupableDBOptions +func NewBackupableDBOptions(backupDir string) *BackupableDBOptions { + cDir := C.CString(backupDir) + op := C.rocksdb_backupable_db_options_create(cDir) + C.free(unsafe.Pointer(cDir)) + return &BackupableDBOptions{c: op} +} + +// SetBackupDir sets where to keep the backup files. Has to be different than dbname_ +// Best to set this to dbname_ + "/backups". +func (b *BackupableDBOptions) SetBackupDir(dir string) { + cDir := C.CString(dir) + C.rocksdb_backupable_db_options_set_backup_dir(b.c, cDir) + C.free(unsafe.Pointer(cDir)) +} + +// SetEnv to be used for backup file I/O. If it's +// nullptr, backups will be written out using DBs Env. If it's +// non-nullptr, backup's I/O will be performed using this object. +// If you want to have backups on HDFS, use HDFS Env here! +func (b *BackupableDBOptions) SetEnv(env *Env) { + C.rocksdb_backupable_db_options_set_env(b.c, env.c) +} + +// ShareTableFiles if set to true, backup will assume that table files with +// same name have the same contents. This enables incremental backups and +// avoids unnecessary data copies. +// +// If false, each backup will be on its own and will +// not share any data with other backups. +// +// Default: true +func (b *BackupableDBOptions) ShareTableFiles(flag bool) { + C.rocksdb_backupable_db_options_set_share_table_files(b.c, boolToChar(flag)) +} + +// IsShareTableFiles returns if backup will assume that table files with +// same name have the same contents. This enables incremental backups and +// avoids unnecessary data copies. +// +// If false, each backup will be on its own and will +// not share any data with other backups. +func (b *BackupableDBOptions) IsShareTableFiles() bool { + return charToBool(C.rocksdb_backupable_db_options_get_share_table_files(b.c)) +} + +// SetSync if true, we can guarantee you'll get consistent backup even +// on a machine crash/reboot. Backup process is slower with sync enabled. +// +// If false, we don't guarantee anything on machine reboot. However, +// chances are some of the backups are consistent. +// +// Default: true +func (b *BackupableDBOptions) SetSync(flag bool) { + C.rocksdb_backupable_db_options_set_sync(b.c, boolToChar(flag)) +} + +// IsSync if true, we can guarantee you'll get consistent backup even +// on a machine crash/reboot. Backup process is slower with sync enabled. +// +// If false, we don't guarantee anything on machine reboot. However, +// chances are some of the backups are consistent. +func (b *BackupableDBOptions) IsSync() bool { + return charToBool(C.rocksdb_backupable_db_options_get_sync(b.c)) +} + +// DestroyOldData if true, it will delete whatever backups there are already +// +// Default: false +func (b *BackupableDBOptions) DestroyOldData(flag bool) { + C.rocksdb_backupable_db_options_set_destroy_old_data(b.c, boolToChar(flag)) +} + +// IsDestroyOldData indicates if we should delete whatever backups there are already. +func (b *BackupableDBOptions) IsDestroyOldData() bool { + return charToBool(C.rocksdb_backupable_db_options_get_destroy_old_data(b.c)) +} + +// BackupLogFiles if false, we won't backup log files. This option can be useful for backing +// up in-memory databases where log file are persisted, but table files are in +// memory. +// +// Default: true +func (b *BackupableDBOptions) BackupLogFiles(flag bool) { + C.rocksdb_backupable_db_options_set_backup_log_files(b.c, boolToChar(flag)) +} + +// IsBackupLogFiles if false, we won't backup log files. This option can be useful for backing +// up in-memory databases where log file are persisted, but table files are in +// memory. +func (b *BackupableDBOptions) IsBackupLogFiles() bool { + return charToBool(C.rocksdb_backupable_db_options_get_backup_log_files(b.c)) +} + +// SetBackupRateLimit sets max bytes that can be transferred in a second during backup. +// If 0, go as fast as you can. +// +// Default: 0 +func (b *BackupableDBOptions) SetBackupRateLimit(limit uint64) { + C.rocksdb_backupable_db_options_set_backup_rate_limit(b.c, C.uint64_t(limit)) +} + +// GetBackupRateLimit gets max bytes that can be transferred in a second during backup. +// If 0, go as fast as you can. +func (b *BackupableDBOptions) GetBackupRateLimit() uint64 { + return uint64(C.rocksdb_backupable_db_options_get_backup_rate_limit(b.c)) +} + +// SetRestoreRateLimit sets max bytes that can be transferred in a second during restore. +// If 0, go as fast as you can +// +// Default: 0 +func (b *BackupableDBOptions) SetRestoreRateLimit(limit uint64) { + C.rocksdb_backupable_db_options_set_restore_rate_limit(b.c, C.uint64_t(limit)) +} + +// GetRestoreRateLimit gets max bytes that can be transferred in a second during restore. +// If 0, go as fast as you can +func (b *BackupableDBOptions) GetRestoreRateLimit() uint64 { + return uint64(C.rocksdb_backupable_db_options_get_restore_rate_limit(b.c)) +} + +// SetMaxBackgroundOperations sets max number of background threads will copy files for CreateNewBackup() +// and RestoreDBFromBackup() +// +// Default: 1 +func (b *BackupableDBOptions) SetMaxBackgroundOperations(v int) { + C.rocksdb_backupable_db_options_set_max_background_operations(b.c, C.int(v)) +} + +// GetMaxBackgroundOperations gets max number of background threads will copy files for CreateNewBackup() +// and RestoreDBFromBackup() +func (b *BackupableDBOptions) GetMaxBackgroundOperations() int { + return int(C.rocksdb_backupable_db_options_get_max_background_operations(b.c)) +} + +// SetCallbackTriggerIntervalSize sets size (N) during backup user can get callback every time next +// N bytes being copied. +// +// Default: N=4194304 +func (b *BackupableDBOptions) SetCallbackTriggerIntervalSize(size uint64) { + C.rocksdb_backupable_db_options_set_callback_trigger_interval_size(b.c, C.uint64_t(size)) +} + +// GetCallbackTriggerIntervalSize gets size (N) during backup user can get callback every time next +// N bytes being copied. +func (b *BackupableDBOptions) GetCallbackTriggerIntervalSize() uint64 { + return uint64(C.rocksdb_backupable_db_options_get_callback_trigger_interval_size(b.c)) +} + +// SetMaxValidBackupsToOpen sets max number of valid backup to open. +// +// For BackupEngineReadOnly, Open() will open at most this many of the +// latest non-corrupted backups. +// +// Note: this setting is ignored (behaves like INT_MAX) for any kind of +// writable BackupEngine because it would inhibit accounting for shared +// files for proper backup deletion, including purging any incompletely +// created backups on creation of a new backup. +// +// Default: INT_MAX +func (b *BackupableDBOptions) SetMaxValidBackupsToOpen(val int) { + C.rocksdb_backupable_db_options_set_max_valid_backups_to_open(b.c, C.int(val)) +} + +// GetMaxValidBackupsToOpen gets max number of valid backup to open. +// +// For BackupEngineReadOnly, Open() will open at most this many of the +// latest non-corrupted backups. +// +// Note: this setting is ignored (behaves like INT_MAX) for any kind of +// writable BackupEngine because it would inhibit accounting for shared +// files for proper backup deletion, including purging any incompletely +// created backups on creation of a new backup. +func (b *BackupableDBOptions) GetMaxValidBackupsToOpen() int { + return int(C.rocksdb_backupable_db_options_get_max_valid_backups_to_open(b.c)) +} + +// SetShareFilesWithChecksumNaming sets naming option for share_files_with_checksum table files. See +// ShareFilesNaming for details. +// +// Modifying this option cannot introduce a downgrade compatibility issue +// because RocksDB can read, restore, and delete backups using different file +// names, and it's OK for a backup directory to use a mixture of table file +// naming schemes. +// +// However, modifying this option and saving more backups to the same +// directory can lead to the same file getting saved again to that +// directory, under the new shared name in addition to the old shared +// name. +// +// Default: UseDBSessionID | FlagIncludeFileSize | FlagMatchInterimNaming +func (b *BackupableDBOptions) SetShareFilesWithChecksumNaming(val ShareFilesNaming) { + C.rocksdb_backupable_db_options_set_share_files_with_checksum_naming(b.c, C.int(val)) +} + +// GetShareFilesWithChecksumNaming gets naming option for share_files_with_checksum table files. See +// ShareFilesNaming for details. +func (b *BackupableDBOptions) GetShareFilesWithChecksumNaming() ShareFilesNaming { + return ShareFilesNaming(C.rocksdb_backupable_db_options_get_share_files_with_checksum_naming(b.c)) +} + +// Destroy releases these options. +func (b *BackupableDBOptions) Destroy() { + C.rocksdb_backupable_db_options_destroy(b.c) +} diff --git a/options_backupabledb_test.go b/options_backupabledb_test.go new file mode 100644 index 0000000..7d743f3 --- /dev/null +++ b/options_backupabledb_test.go @@ -0,0 +1,59 @@ +package grocksdb + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBackupableDBOptions(t *testing.T) { + opts := NewBackupableDBOptions("/tmp/v1") + defer opts.Destroy() + + env := NewDefaultEnv() + defer env.Destroy() + + opts.SetEnv(env) + opts.SetBackupDir("/tmp/v2") + + require.True(t, opts.IsShareTableFiles()) // check default value + opts.ShareTableFiles(false) + require.False(t, opts.IsShareTableFiles()) + + require.True(t, opts.IsSync()) + opts.SetSync(false) + require.False(t, opts.IsSync()) + + require.False(t, opts.IsDestroyOldData()) + opts.DestroyOldData(true) + require.True(t, opts.IsDestroyOldData()) + + require.True(t, opts.IsBackupLogFiles()) + opts.BackupLogFiles(false) + require.False(t, opts.IsBackupLogFiles()) + + require.EqualValues(t, 0, opts.GetBackupRateLimit()) + opts.SetBackupRateLimit(531 << 10) + require.EqualValues(t, 531<<10, opts.GetBackupRateLimit()) + + require.EqualValues(t, 0, opts.GetRestoreRateLimit()) + opts.SetRestoreRateLimit(53 << 10) + require.EqualValues(t, 53<<10, opts.GetRestoreRateLimit()) + + require.EqualValues(t, 1, opts.GetMaxBackgroundOperations()) + opts.SetMaxBackgroundOperations(3) + require.EqualValues(t, 3, opts.GetMaxBackgroundOperations()) + + require.EqualValues(t, 4194304, opts.GetCallbackTriggerIntervalSize()) + opts.SetCallbackTriggerIntervalSize(800 << 10) + require.EqualValues(t, 800<<10, opts.GetCallbackTriggerIntervalSize()) + + require.EqualValues(t, math.MaxInt32, opts.GetMaxValidBackupsToOpen()) + opts.SetMaxValidBackupsToOpen(29) + require.EqualValues(t, 29, opts.GetMaxValidBackupsToOpen()) + + require.EqualValues(t, UseDBSessionID|FlagIncludeFileSize|FlagMatchInterimNaming, opts.GetShareFilesWithChecksumNaming()) + opts.SetShareFilesWithChecksumNaming(UseDBSessionID | LegacyCrc32cAndFileSize) + require.EqualValues(t, UseDBSessionID|LegacyCrc32cAndFileSize, opts.GetShareFilesWithChecksumNaming()) +} diff --git a/options_compaction.go b/options_compaction.go index c226872..e6a38b6 100644 --- a/options_compaction.go +++ b/options_compaction.go @@ -126,6 +126,13 @@ func (opts *FIFOCompactionOptions) SetMaxTableFilesSize(value uint64) { C.rocksdb_fifo_compaction_options_set_max_table_files_size(opts.c, C.uint64_t(value)) } +// GetMaxTableFilesSize gets the max table file size. +// Once the total sum of table files reaches this, we will delete the oldest +// table file +func (opts *FIFOCompactionOptions) GetMaxTableFilesSize() uint64 { + return uint64(C.rocksdb_fifo_compaction_options_get_max_table_files_size(opts.c)) +} + // Destroy deallocates the FIFOCompactionOptions object. func (opts *FIFOCompactionOptions) Destroy() { C.rocksdb_fifo_compaction_options_destroy(opts.c) @@ -155,17 +162,29 @@ func NewNativeUniversalCompactionOptions(c *C.rocksdb_universal_compaction_optio // then include next file into this candidate set. // // Default: 1 -func (opts *UniversalCompactionOptions) SetSizeRatio(value uint) { +func (opts *UniversalCompactionOptions) SetSizeRatio(value int) { C.rocksdb_universal_compaction_options_set_size_ratio(opts.c, C.int(value)) } +// GetSizeRatio gets the percentage flexibility while comparing file size. +// If the candidate file(s) size is 1% smaller than the next file's size, +// then include next file into this candidate set. +func (opts *UniversalCompactionOptions) GetSizeRatio() int { + return int(C.rocksdb_universal_compaction_options_get_size_ratio(opts.c)) +} + // SetMinMergeWidth sets the minimum number of files in a single compaction run. // // Default: 2 -func (opts *UniversalCompactionOptions) SetMinMergeWidth(value uint) { +func (opts *UniversalCompactionOptions) SetMinMergeWidth(value int) { C.rocksdb_universal_compaction_options_set_min_merge_width(opts.c, C.int(value)) } +// GetMinMergeWidth gets the minimum number of files in a single compaction run. +func (opts *UniversalCompactionOptions) GetMinMergeWidth() int { + return int(C.rocksdb_universal_compaction_options_get_min_merge_width(opts.c)) +} + // SetMaxMergeWidth sets the maximum number of files in a single compaction run. // // Default: UINT_MAX @@ -173,6 +192,11 @@ func (opts *UniversalCompactionOptions) SetMaxMergeWidth(value uint) { C.rocksdb_universal_compaction_options_set_max_merge_width(opts.c, C.int(value)) } +// GetMaxMergeWidth gets the maximum number of files in a single compaction run. +func (opts *UniversalCompactionOptions) GetMaxMergeWidth() int { + return int(C.rocksdb_universal_compaction_options_get_max_merge_width(opts.c)) +} + // SetMaxSizeAmplificationPercent sets the size amplification. // It is defined as the amount (in percentage) of // additional storage needed to store a single byte of data in the database. @@ -185,10 +209,23 @@ func (opts *UniversalCompactionOptions) SetMaxMergeWidth(value uint) { // // Default: 200, which means that a 100 byte database could require upto // 300 bytes of storage. -func (opts *UniversalCompactionOptions) SetMaxSizeAmplificationPercent(value uint) { +func (opts *UniversalCompactionOptions) SetMaxSizeAmplificationPercent(value int) { C.rocksdb_universal_compaction_options_set_max_size_amplification_percent(opts.c, C.int(value)) } +// GetMaxSizeAmplificationPercent gets the size amplification. +// It is defined as the amount (in percentage) of +// additional storage needed to store a single byte of data in the database. +// For example, a size amplification of 2% means that a database that +// contains 100 bytes of user-data may occupy upto 102 bytes of +// physical storage. By this definition, a fully compacted database has +// a size amplification of 0%. Rocksdb uses the following heuristic +// to calculate size amplification: it assumes that all files excluding +// the earliest file contribute to the size amplification. +func (opts *UniversalCompactionOptions) GetMaxSizeAmplificationPercent() int { + return int(C.rocksdb_universal_compaction_options_get_max_size_amplification_percent(opts.c)) +} + // SetCompressionSizePercent sets the percentage of compression size. // // If this option is set to be -1, all the output files @@ -212,6 +249,27 @@ func (opts *UniversalCompactionOptions) SetCompressionSizePercent(value int) { C.rocksdb_universal_compaction_options_set_compression_size_percent(opts.c, C.int(value)) } +// GetCompressionSizePercent gets the percentage of compression size. +// +// If this option is set to be -1, all the output files +// will follow compression type specified. +// +// If this option is not negative, we will try to make sure compressed +// size is just above this value. In normal cases, at least this percentage +// of data will be compressed. +// When we are compacting to a new file, here is the criteria whether +// it needs to be compressed: assuming here are the list of files sorted +// by generation time: +// A1...An B1...Bm C1...Ct +// where A1 is the newest and Ct is the oldest, and we are going to compact +// B1...Bm, we calculate the total size of all the files as total_size, as +// well as the total size of C1...Ct as total_C, the compaction output file +// will be compressed iff +// total_C / total_size < this percentage +func (opts *UniversalCompactionOptions) GetCompressionSizePercent() int { + return int(C.rocksdb_universal_compaction_options_get_compression_size_percent(opts.c)) +} + // SetStopStyle sets the algorithm used to stop picking files into a single compaction run. // // Default: CompactionStopStyleTotalSize @@ -219,6 +277,11 @@ func (opts *UniversalCompactionOptions) SetStopStyle(value UniversalCompactionSt C.rocksdb_universal_compaction_options_set_stop_style(opts.c, C.int(value)) } +// GetStopStyle gets the algorithm used to stop picking files into a single compaction run. +func (opts *UniversalCompactionOptions) GetStopStyle() UniversalCompactionStopStyle { + return UniversalCompactionStopStyle(C.rocksdb_universal_compaction_options_get_stop_style(opts.c)) +} + // Destroy deallocates the UniversalCompactionOptions object. func (opts *UniversalCompactionOptions) Destroy() { C.rocksdb_universal_compaction_options_destroy(opts.c) diff --git a/options_compaction_test.go b/options_compaction_test.go index 821e1f5..4111932 100644 --- a/options_compaction_test.go +++ b/options_compaction_test.go @@ -32,6 +32,7 @@ func TestFifoCompactOption(t *testing.T) { defer fo.Destroy() fo.SetMaxTableFilesSize(2 << 10) + require.EqualValues(t, 2<<10, fo.GetMaxTableFilesSize()) } func TestUniversalCompactOption(t *testing.T) { @@ -39,9 +40,20 @@ func TestUniversalCompactOption(t *testing.T) { defer uo.Destroy() uo.SetSizeRatio(2) + require.EqualValues(t, 2, uo.GetSizeRatio()) + uo.SetMinMergeWidth(3) + require.EqualValues(t, 3, uo.GetMinMergeWidth()) + uo.SetMaxMergeWidth(123) + require.EqualValues(t, 123, uo.GetMaxMergeWidth()) + uo.SetMaxSizeAmplificationPercent(20) - uo.SetCompressionSizePercent(15) + require.EqualValues(t, 20, uo.GetMaxSizeAmplificationPercent()) + + uo.SetCompressionSizePercent(18) + require.EqualValues(t, 18, uo.GetCompressionSizePercent()) + uo.SetStopStyle(CompactionStopStyleTotalSize) + require.EqualValues(t, CompactionStopStyleTotalSize, uo.GetStopStyle()) } diff --git a/options_env_test.go b/options_env_test.go new file mode 100644 index 0000000..ba9a868 --- /dev/null +++ b/options_env_test.go @@ -0,0 +1,8 @@ +package grocksdb + +import "testing" + +func TestOptEnv(t *testing.T) { + opt := NewDefaultEnvOptions() + defer opt.Destroy() +}