diff --git a/.travis.yml b/.travis.yml index ebadb6f..3b378c4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ language: go go: - 1.13.x - 1.14.x + - 1.15.x install: - go get -u github.com/mattn/goveralls diff --git a/backup.go b/backup.go index 1b98d4a..66c94de 100644 --- a/backup.go +++ b/backup.go @@ -113,20 +113,20 @@ func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *Resto return } -// // RestoreDBFromBackup restores the backup (identified by its id) to dbDir. walDir -// // is where the write ahead logs are restored to and usually the same as dbDir. -// func (b *BackupEngine) RestoreDBFromBackup(dbDir, walDir string, ro *RestoreOptions, backupID uint32) (err error) { -// cDbDir := C.CString(dbDir) -// cWalDir := C.CString(walDir) - -// var cErr *C.char -// C.rocksdb_backup_engine_restore_db_from_backup(b.c, cDbDir, cWalDir, ro.c, C.uint32_t(backupID), &cErr) -// err = fromCError(cErr) - -// C.free(unsafe.Pointer(cDbDir)) -// C.free(unsafe.Pointer(cWalDir)) -// return -// } +// RestoreDBFromBackup restores the backup (identified by its id) to dbDir. walDir +// is where the write ahead logs are restored to and usually the same as dbDir. +func (b *BackupEngine) RestoreDBFromBackup(dbDir, walDir string, ro *RestoreOptions, backupID uint32) (err error) { + cDbDir := C.CString(dbDir) + cWalDir := C.CString(walDir) + + var cErr *C.char + C.rocksdb_backup_engine_restore_db_from_backup(b.c, cDbDir, cWalDir, ro.c, C.uint32_t(backupID), &cErr) + err = fromCError(cErr) + + C.free(unsafe.Pointer(cDbDir)) + C.free(unsafe.Pointer(cWalDir)) + return +} // Close close the backup engine and cleans up state // The backups already taken remain on storage. diff --git a/backup_test.go b/backup_test.go index ee7e42d..a639dbf 100644 --- a/backup_test.go +++ b/backup_test.go @@ -1,115 +1,114 @@ package grocksdb -// import ( -// "io/ioutil" -// "testing" - -// "github.com/facebookgo/ensure" -// "github.com/stretchr/testify/require" -// ) - -// func TestBackupEngine(t *testing.T) { -// db := newTestDB(t, "TestDBBackup", nil) -// defer db.Close() - -// var ( -// givenKey = []byte("hello") -// givenVal1 = []byte("") -// givenVal2 = []byte("world1") -// wo = NewDefaultWriteOptions() -// ro = NewDefaultReadOptions() -// ) -// defer wo.Destroy() -// defer ro.Destroy() - -// // create -// ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) - -// // retrieve -// v1, err := db.Get(ro, givenKey) -// defer v1.Free() -// ensure.Nil(t, err) -// ensure.DeepEqual(t, v1.Data(), givenVal1) - -// // retrieve bytes -// _v1, err := db.GetBytes(ro, givenKey) -// ensure.Nil(t, err) -// ensure.DeepEqual(t, _v1, givenVal1) - -// // update -// ensure.Nil(t, db.Put(wo, givenKey, givenVal2)) -// v2, err := db.Get(ro, givenKey) -// defer v2.Free() -// ensure.Nil(t, err) -// ensure.DeepEqual(t, v2.Data(), givenVal2) - -// // retrieve pinned -// v3, err := db.GetPinned(ro, givenKey) -// defer v3.Destroy() -// ensure.Nil(t, err) -// ensure.DeepEqual(t, v3.Data(), givenVal2) - -// engine, err := CreateBackupEngine(db) -// require.Nil(t, err) -// defer engine.Close() - -// t.Run("createBackupAndVerify", func(t *testing.T) { -// infos := engine.GetInfo() -// require.Empty(t, infos) - -// // create first backup -// require.Nil(t, engine.CreateNewBackup()) - -// // create second backup -// require.Nil(t, engine.CreateNewBackupFlush(true)) - -// infos = engine.GetInfo() -// require.Equal(t, 2, len(infos)) -// for i := range infos { -// require.Nil(t, engine.VerifyBackup(infos[i].ID)) -// require.True(t, infos[i].Size > 0) -// require.True(t, infos[i].NumFiles > 0) -// } -// }) - -// t.Run("purge", func(t *testing.T) { -// require.Nil(t, engine.PurgeOldBackups(1)) - -// infos := engine.GetInfo() -// require.Equal(t, 1, len(infos)) -// }) - -// t.Run("restoreFromLatest", func(t *testing.T) { -// dir, err := ioutil.TempDir("", "gorocksdb-restoreFromLatest") -// require.Nil(t, err) - -// ro := NewRestoreOptions() -// defer ro.Destroy() -// require.Nil(t, engine.RestoreDBFromLatestBackup(dir, dir, ro)) -// require.Nil(t, engine.RestoreDBFromLatestBackup(dir, dir, ro)) -// }) - -// t.Run("restoreFromBackup", func(t *testing.T) { -// infos := engine.GetInfo() -// require.Equal(t, 1, len(infos)) - -// dir, err := ioutil.TempDir("", "gorocksdb-restoreFromBackup") -// require.Nil(t, err) - -// ro := NewRestoreOptions() -// defer ro.Destroy() -// require.Nil(t, engine.RestoreDBFromBackup(dir, dir, ro, infos[0].ID)) - -// // try to reopen restored db -// backupDB, err := OpenDb(db.opts, dir) -// require.Nil(t, err) - -// r := NewDefaultReadOptions() -// defer r.Destroy() - -// v3, err := backupDB.GetPinned(r, givenKey) -// defer v3.Destroy() -// ensure.Nil(t, err) -// ensure.DeepEqual(t, v3.Data(), givenVal2) -// }) -// } +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBackupEngine(t *testing.T) { + db := newTestDB(t, "TestDBBackup", nil) + defer db.Close() + + var ( + givenKey = []byte("hello") + givenVal1 = []byte("") + givenVal2 = []byte("world1") + wo = NewDefaultWriteOptions() + ro = NewDefaultReadOptions() + ) + defer wo.Destroy() + defer ro.Destroy() + + // create + require.Nil(t, db.Put(wo, givenKey, givenVal1)) + + // retrieve + v1, err := db.Get(ro, givenKey) + defer v1.Free() + require.Nil(t, err) + require.EqualValues(t, v1.Data(), givenVal1) + + // retrieve bytes + _v1, err := db.GetBytes(ro, givenKey) + require.Nil(t, err) + require.EqualValues(t, _v1, givenVal1) + + // update + require.Nil(t, db.Put(wo, givenKey, givenVal2)) + v2, err := db.Get(ro, givenKey) + defer v2.Free() + require.Nil(t, err) + require.EqualValues(t, v2.Data(), givenVal2) + + // retrieve pinned + v3, err := db.GetPinned(ro, givenKey) + defer v3.Destroy() + require.Nil(t, err) + require.EqualValues(t, v3.Data(), givenVal2) + + engine, err := CreateBackupEngine(db) + require.Nil(t, err) + defer engine.Close() + + t.Run("createBackupAndVerify", func(t *testing.T) { + infos := engine.GetInfo() + require.Empty(t, infos) + + // create first backup + require.Nil(t, engine.CreateNewBackup()) + + // create second backup + require.Nil(t, engine.CreateNewBackupFlush(true)) + + infos = engine.GetInfo() + require.Equal(t, 2, len(infos)) + for i := range infos { + require.Nil(t, engine.VerifyBackup(infos[i].ID)) + require.True(t, infos[i].Size > 0) + require.True(t, infos[i].NumFiles > 0) + } + }) + + t.Run("purge", func(t *testing.T) { + require.Nil(t, engine.PurgeOldBackups(1)) + + infos := engine.GetInfo() + require.Equal(t, 1, len(infos)) + }) + + t.Run("restoreFromLatest", func(t *testing.T) { + dir, err := ioutil.TempDir("", "gorocksdb-restoreFromLatest") + require.Nil(t, err) + + ro := NewRestoreOptions() + defer ro.Destroy() + require.Nil(t, engine.RestoreDBFromLatestBackup(dir, dir, ro)) + require.Nil(t, engine.RestoreDBFromLatestBackup(dir, dir, ro)) + }) + + t.Run("restoreFromBackup", func(t *testing.T) { + infos := engine.GetInfo() + require.Equal(t, 1, len(infos)) + + dir, err := ioutil.TempDir("", "gorocksdb-restoreFromBackup") + require.Nil(t, err) + + ro := NewRestoreOptions() + defer ro.Destroy() + require.Nil(t, engine.RestoreDBFromBackup(dir, dir, ro, infos[0].ID)) + + // try to reopen restored db + backupDB, err := OpenDb(db.opts, dir) + require.Nil(t, err) + + r := NewDefaultReadOptions() + defer r.Destroy() + + v3, err := backupDB.GetPinned(r, givenKey) + defer v3.Destroy() + require.Nil(t, err) + require.EqualValues(t, v3.Data(), givenVal2) + }) +} diff --git a/build.sh b/build.sh index a96af11..8ff5aca 100644 --- a/build.sh +++ b/build.sh @@ -28,7 +28,7 @@ cd $BUILD_PATH && wget https://github.com/facebook/zstd/releases/download/v1.4.5 $CMAKE_REQUIRED_PARAMS -DZSTD_ZLIB_SUPPORT=ON -DZSTD_LZMA_SUPPORT=OFF -DCMAKE_BUILD_TYPE=Release .. && make -j16 install && \ cd $BUILD_PATH && rm -rf * -cd $BUILD_PATH && wget https://github.com/facebook/rocksdb/archive/v6.11.6.tar.gz && tar xzf v6.11.6.tar.gz && cd rocksdb-6.11.6/ && \ +cd $BUILD_PATH && wget https://github.com/facebook/rocksdb/archive/v6.12.7.tar.gz && tar xzf v6.12.7.tar.gz && cd rocksdb-6.12.7/ && \ mkdir -p build_place && cd build_place && cmake -DCMAKE_BUILD_TYPE=Release $CMAKE_REQUIRED_PARAMS -DCMAKE_PREFIX_PATH=$INSTALL_PREFIX -DWITH_TESTS=OFF -DWITH_GFLAGS=OFF \ -DWITH_BENCHMARK_TOOLS=OFF -DWITH_TOOLS=OFF -DWITH_MD_LIBRARY=OFF -DWITH_RUNTIME_DEBUG=OFF -DROCKSDB_BUILD_SHARED=OFF -DWITH_SNAPPY=ON -DWITH_LZ4=ON -DWITH_ZLIB=ON \ -DWITH_ZSTD=ON -DWITH_BZ2=OFF -WITH_GFLAGS=OFF .. && make -j16 install/strip && \ diff --git a/cf_test.go b/cf_test.go index 9d8a98a..23a09c0 100644 --- a/cf_test.go +++ b/cf_test.go @@ -4,12 +4,12 @@ import ( "io/ioutil" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestColumnFamilyOpen(t *testing.T) { dir, err := ioutil.TempDir("", "gorocksdb-TestColumnFamilyOpen") - ensure.Nil(t, err) + require.Nil(t, err) givenNames := []string{"default", "guide"} opts := NewDefaultOptions() @@ -17,55 +17,55 @@ func TestColumnFamilyOpen(t *testing.T) { opts.SetCreateIfMissing(true) opts.SetCompression(LZ4Compression) db, cfh, err := OpenDbColumnFamilies(opts, dir, givenNames, []*Options{opts, opts}) - ensure.Nil(t, err) + require.Nil(t, err) defer db.Close() - ensure.DeepEqual(t, len(cfh), 2) + require.EqualValues(t, len(cfh), 2) cfh[0].Destroy() cfh[1].Destroy() actualNames, err := ListColumnFamilies(opts, dir) - ensure.Nil(t, err) - ensure.SameElements(t, actualNames, givenNames) + require.Nil(t, err) + require.EqualValues(t, actualNames, givenNames) } func TestColumnFamilyCreateDrop(t *testing.T) { dir, err := ioutil.TempDir("", "gorocksdb-TestColumnFamilyCreate") - ensure.Nil(t, err) + require.Nil(t, err) opts := NewDefaultOptions() opts.SetCreateIfMissingColumnFamilies(true) opts.SetCreateIfMissing(true) opts.SetCompression(LZ4HCCompression) db, err := OpenDb(opts, dir) - ensure.Nil(t, err) + require.Nil(t, err) defer db.Close() cf, err := db.CreateColumnFamily(opts, "guide") - ensure.Nil(t, err) + require.Nil(t, err) defer cf.Destroy() actualNames, err := ListColumnFamilies(opts, dir) - ensure.Nil(t, err) - ensure.SameElements(t, actualNames, []string{"default", "guide"}) + require.Nil(t, err) + require.EqualValues(t, actualNames, []string{"default", "guide"}) - ensure.Nil(t, db.DropColumnFamily(cf)) + require.Nil(t, db.DropColumnFamily(cf)) actualNames, err = ListColumnFamilies(opts, dir) - ensure.Nil(t, err) - ensure.SameElements(t, actualNames, []string{"default"}) + require.Nil(t, err) + require.EqualValues(t, actualNames, []string{"default"}) } func TestColumnFamilyBatchPutGet(t *testing.T) { dir, err := ioutil.TempDir("", "gorocksdb-TestColumnFamilyPutGet") - ensure.Nil(t, err) + require.Nil(t, err) givenNames := []string{"default", "guide"} opts := NewDefaultOptions() opts.SetCreateIfMissingColumnFamilies(true) opts.SetCreateIfMissing(true) db, cfh, err := OpenDbColumnFamilies(opts, dir, givenNames, []*Options{opts, opts}) - ensure.Nil(t, err) + require.Nil(t, err) defer db.Close() - ensure.DeepEqual(t, len(cfh), 2) + require.EqualValues(t, len(cfh), 2) defer cfh[0].Destroy() defer cfh[1].Destroy() @@ -82,35 +82,41 @@ func TestColumnFamilyBatchPutGet(t *testing.T) { b0 := NewWriteBatch() defer b0.Destroy() b0.PutCF(cfh[0], givenKey0, givenVal0) - ensure.Nil(t, db.Write(wo, b0)) + require.Nil(t, db.Write(wo, b0)) actualVal0, err := db.GetCF(ro, cfh[0], givenKey0) defer actualVal0.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal0.Data(), givenVal0) + require.Nil(t, err) + require.EqualValues(t, actualVal0.Data(), givenVal0) b1 := NewWriteBatch() defer b1.Destroy() b1.PutCF(cfh[1], givenKey1, givenVal1) - ensure.Nil(t, db.Write(wo, b1)) + require.Nil(t, db.Write(wo, b1)) actualVal1, err := db.GetCF(ro, cfh[1], givenKey1) defer actualVal1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal1.Data(), givenVal1) + require.Nil(t, err) + require.EqualValues(t, actualVal1.Data(), givenVal1) actualVal, err := db.GetCF(ro, cfh[0], givenKey1) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal.Size(), 0) + require.Nil(t, err) + require.EqualValues(t, actualVal.Size(), 0) actualVal, err = db.GetCF(ro, cfh[1], givenKey0) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal.Size(), 0) + require.Nil(t, err) + require.EqualValues(t, actualVal.Size(), 0) + + { + v := db.KeyMayExistsCF(ro, cfh[0], givenKey0, "") + defer v.Free() + require.True(t, v.Size() > 0) + } // trigger flush - ensure.Nil(t, db.FlushCF(cfh[0], NewDefaultFlushOptions())) + require.Nil(t, db.FlushCF(cfh[0], NewDefaultFlushOptions())) } func TestColumnFamilyPutGetDelete(t *testing.T) { dir, err := ioutil.TempDir("", "gorocksdb-TestColumnFamilyPutGet") - ensure.Nil(t, err) + require.Nil(t, err) givenNames := []string{"default", "guide"} opts := NewDefaultOptions() @@ -118,9 +124,9 @@ func TestColumnFamilyPutGetDelete(t *testing.T) { opts.SetCreateIfMissing(true) opts.SetCompression(SnappyCompression) db, cfh, err := OpenDbColumnFamilies(opts, dir, givenNames, []*Options{opts, opts}) - ensure.Nil(t, err) + require.Nil(t, err) defer db.Close() - ensure.DeepEqual(t, len(cfh), 2) + require.EqualValues(t, len(cfh), 2) defer cfh[0].Destroy() defer cfh[1].Destroy() @@ -135,53 +141,58 @@ func TestColumnFamilyPutGetDelete(t *testing.T) { givenVal1 := []byte("world1") { - ensure.Nil(t, db.PutCF(wo, cfh[0], givenKey0, givenVal0)) + require.Nil(t, db.PutCF(wo, cfh[0], givenKey0, givenVal0)) actualVal0, err := db.GetCF(ro, cfh[0], givenKey0) defer actualVal0.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal0.Data(), givenVal0) + require.Nil(t, err) + require.EqualValues(t, actualVal0.Data(), givenVal0) - ensure.Nil(t, db.PutCF(wo, cfh[1], givenKey1, givenVal1)) + require.Nil(t, db.PutCF(wo, cfh[1], givenKey1, givenVal1)) actualVal1, err := db.GetCF(ro, cfh[1], givenKey1) defer actualVal1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal1.Data(), givenVal1) + require.Nil(t, err) + require.EqualValues(t, actualVal1.Data(), givenVal1) actualVal, err := db.GetCF(ro, cfh[0], givenKey1) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal.Size(), 0) + require.Nil(t, err) + require.EqualValues(t, actualVal.Size(), 0) actualVal, err = db.GetCF(ro, cfh[1], givenKey0) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal.Size(), 0) + require.Nil(t, err) + require.EqualValues(t, actualVal.Size(), 0) - ensure.Nil(t, db.DeleteCF(wo, cfh[0], givenKey0)) + require.Nil(t, db.DeleteCF(wo, cfh[0], givenKey0)) actualVal, err = db.GetCF(ro, cfh[0], givenKey0) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal.Size(), 0) + require.Nil(t, err) + require.EqualValues(t, actualVal.Size(), 0) + + { + v := db.KeyMayExistsCF(ro, cfh[0], givenKey0, "") + defer v.Free() + } } { - ensure.Nil(t, db.PutCF(wo, cfh[0], givenKey0, givenVal0)) + require.Nil(t, db.PutCF(wo, cfh[0], givenKey0, givenVal0)) actualVal0, err := db.GetCF(ro, cfh[0], givenKey0) defer actualVal0.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal0.Data(), givenVal0) + require.Nil(t, err) + require.EqualValues(t, actualVal0.Data(), givenVal0) - ensure.Nil(t, db.DeleteRangeCF(wo, cfh[0], givenKey0, givenKey1)) + require.Nil(t, db.DeleteRangeCF(wo, cfh[0], givenKey0, givenKey1)) actualVal, err := db.GetCF(ro, cfh[0], givenKey0) - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal.Size(), 0) + require.Nil(t, err) + require.EqualValues(t, actualVal.Size(), 0) actualVal1, err := db.GetCF(ro, cfh[1], givenKey1) defer actualVal1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, actualVal1.Data(), givenVal1) + require.Nil(t, err) + require.EqualValues(t, actualVal1.Data(), givenVal1) } } func newTestDBCF(t *testing.T, name string) (db *DB, cfh []*ColumnFamilyHandle, cleanup func()) { dir, err := ioutil.TempDir("", "gorocksdb-TestColumnFamilyPutGet") - ensure.Nil(t, err) + require.Nil(t, err) givenNames := []string{"default", "guide"} opts := NewDefaultOptions() @@ -189,7 +200,7 @@ func newTestDBCF(t *testing.T, name string) (db *DB, cfh []*ColumnFamilyHandle, opts.SetCreateIfMissing(true) opts.SetCompression(ZLibCompression) db, cfh, err = OpenDbColumnFamilies(opts, dir, givenNames, []*Options{opts, opts}) - ensure.Nil(t, err) + require.Nil(t, err) cleanup = func() { for _, cf := range cfh { cf.Destroy() @@ -215,31 +226,31 @@ func TestColumnFamilyMultiGet(t *testing.T) { ) // create - ensure.Nil(t, db.PutCF(wo, cfh[0], givenKey1, givenVal1)) - ensure.Nil(t, db.PutCF(wo, cfh[1], givenKey2, givenVal2)) - ensure.Nil(t, db.PutCF(wo, cfh[1], givenKey3, givenVal3)) + require.Nil(t, db.PutCF(wo, cfh[0], givenKey1, givenVal1)) + require.Nil(t, db.PutCF(wo, cfh[1], givenKey2, givenVal2)) + require.Nil(t, db.PutCF(wo, cfh[1], givenKey3, givenVal3)) // column family 0 only has givenKey1 values, err := db.MultiGetCF(ro, cfh[0], []byte("noexist"), givenKey1, givenKey2, givenKey3) defer values.Destroy() - ensure.Nil(t, err) - ensure.DeepEqual(t, len(values), 4) + require.Nil(t, err) + require.EqualValues(t, len(values), 4) - ensure.DeepEqual(t, values[0].Data(), []byte(nil)) - ensure.DeepEqual(t, values[1].Data(), givenVal1) - ensure.DeepEqual(t, values[2].Data(), []byte(nil)) - ensure.DeepEqual(t, values[3].Data(), []byte(nil)) + require.EqualValues(t, values[0].Data(), []byte(nil)) + require.EqualValues(t, values[1].Data(), givenVal1) + require.EqualValues(t, values[2].Data(), []byte(nil)) + require.EqualValues(t, values[3].Data(), []byte(nil)) // column family 1 only has givenKey2 and givenKey3 values, err = db.MultiGetCF(ro, cfh[1], []byte("noexist"), givenKey1, givenKey2, givenKey3) defer values.Destroy() - ensure.Nil(t, err) - ensure.DeepEqual(t, len(values), 4) + require.Nil(t, err) + require.EqualValues(t, len(values), 4) - ensure.DeepEqual(t, values[0].Data(), []byte(nil)) - ensure.DeepEqual(t, values[1].Data(), []byte(nil)) - ensure.DeepEqual(t, values[2].Data(), givenVal2) - ensure.DeepEqual(t, values[3].Data(), givenVal3) + require.EqualValues(t, values[0].Data(), []byte(nil)) + require.EqualValues(t, values[1].Data(), []byte(nil)) + require.EqualValues(t, values[2].Data(), givenVal2) + require.EqualValues(t, values[3].Data(), givenVal3) // getting them all from the right CF should return them all values, err = db.MultiGetCFMultiCF(ro, @@ -247,10 +258,10 @@ func TestColumnFamilyMultiGet(t *testing.T) { [][]byte{givenKey1, givenKey2, givenKey3}, ) defer values.Destroy() - ensure.Nil(t, err) - ensure.DeepEqual(t, len(values), 3) + require.Nil(t, err) + require.EqualValues(t, len(values), 3) - ensure.DeepEqual(t, values[0].Data(), givenVal1) - ensure.DeepEqual(t, values[1].Data(), givenVal2) - ensure.DeepEqual(t, values[2].Data(), givenVal3) + require.EqualValues(t, values[0].Data(), givenVal1) + require.EqualValues(t, values[1].Data(), givenVal2) + require.EqualValues(t, values[2].Data(), givenVal3) } diff --git a/checkpoint_test.go b/checkpoint_test.go index 09ed41f..d9074ef 100644 --- a/checkpoint_test.go +++ b/checkpoint_test.go @@ -5,16 +5,16 @@ import ( "os" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestCheckpoint(t *testing.T) { suffix := "checkpoint" dir, err := ioutil.TempDir("", "gorocksdb-"+suffix) - ensure.Nil(t, err) + require.Nil(t, err) err = os.RemoveAll(dir) - ensure.Nil(t, err) + require.Nil(t, err) db := newTestDB(t, "TestCheckpoint", nil) defer db.Close() @@ -24,7 +24,7 @@ func TestCheckpoint(t *testing.T) { givenVal := []byte("val") wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, givenVal)) + require.Nil(t, db.Put(wo, k, givenVal)) } var dbCheck *DB @@ -32,16 +32,16 @@ func TestCheckpoint(t *testing.T) { checkpoint, err = db.NewCheckpoint() defer checkpoint.Destroy() - ensure.NotNil(t, checkpoint) - ensure.Nil(t, err) + require.NotNil(t, checkpoint) + require.Nil(t, err) err = checkpoint.CreateCheckpoint(dir, 0) - ensure.Nil(t, err) + require.Nil(t, err) opts := NewDefaultOptions() opts.SetCreateIfMissing(true) dbCheck, err = OpenDb(opts, dir) - ensure.Nil(t, err) + require.Nil(t, err) defer dbCheck.Close() // test keys @@ -49,8 +49,8 @@ func TestCheckpoint(t *testing.T) { ro := NewDefaultReadOptions() for _, k := range givenKeys { value, err = dbCheck.Get(ro, k) - ensure.Nil(t, err) - ensure.DeepEqual(t, value.Data(), givenVal) + require.Nil(t, err) + require.EqualValues(t, value.Data(), givenVal) value.Free() } } diff --git a/compaction_filter_test.go b/compaction_filter_test.go index c1646ad..9a7d4b0 100644 --- a/compaction_filter_test.go +++ b/compaction_filter_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestCompactionFilter(t *testing.T) { @@ -32,8 +32,8 @@ func TestCompactionFilter(t *testing.T) { // insert the test keys wo := NewDefaultWriteOptions() - ensure.Nil(t, db.Put(wo, changeKey, changeValOld)) - ensure.Nil(t, db.Put(wo, deleteKey, changeValNew)) + require.Nil(t, db.Put(wo, changeKey, changeValOld)) + require.Nil(t, db.Put(wo, deleteKey, changeValNew)) // trigger a compaction db.CompactRange(Range{nil, nil}) @@ -42,13 +42,13 @@ func TestCompactionFilter(t *testing.T) { ro := NewDefaultReadOptions() v1, err := db.Get(ro, changeKey) defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), changeValNew) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), changeValNew) // ensure that the key is deleted after compaction v2, err := db.Get(ro, deleteKey) - ensure.Nil(t, err) - ensure.True(t, v2.Data() == nil) + require.Nil(t, err) + require.Nil(t, v2.Data()) } type mockCompactionFilter struct { diff --git a/comparator_test.go b/comparator_test.go index d6072ba..a478237 100644 --- a/comparator_test.go +++ b/comparator_test.go @@ -3,7 +3,7 @@ package grocksdb import ( "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestComparator(t *testing.T) { @@ -16,7 +16,7 @@ func TestComparator(t *testing.T) { givenKeys := [][]byte{[]byte("key1"), []byte("key2"), []byte("key3")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + require.Nil(t, db.Put(wo, k, []byte("val"))) } // create a iterator to collect the keys @@ -32,8 +32,8 @@ func TestComparator(t *testing.T) { copy(key, iter.Key().Data()) actualKeys = append(actualKeys, key) } - ensure.Nil(t, iter.Err()) + require.Nil(t, iter.Err()) // ensure that the order is correct - ensure.DeepEqual(t, actualKeys, givenKeys) + require.EqualValues(t, actualKeys, givenKeys) } diff --git a/cow_test.go b/cow_test.go index 61ccae2..cabd617 100644 --- a/cow_test.go +++ b/cow_test.go @@ -5,7 +5,7 @@ import ( "sync" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestCOWList(t *testing.T) { @@ -13,9 +13,9 @@ func TestCOWList(t *testing.T) { cl.Append("hello") cl.Append("world") cl.Append("!") - ensure.DeepEqual(t, cl.Get(0), "hello") - ensure.DeepEqual(t, cl.Get(1), "world") - ensure.DeepEqual(t, cl.Get(2), "!") + require.EqualValues(t, cl.Get(0), "hello") + require.EqualValues(t, cl.Get(1), "world") + require.EqualValues(t, cl.Get(2), "!") } func TestCOWListMT(t *testing.T) { @@ -32,7 +32,7 @@ func TestCOWListMT(t *testing.T) { } wg.Wait() for i, v := range expectedRes { - ensure.DeepEqual(t, cl.Get(i), v) + require.EqualValues(t, cl.Get(i), v) } } diff --git a/db.go b/db.go index d58077b..a518a5b 100755 --- a/db.go +++ b/db.go @@ -404,58 +404,58 @@ func (db *DB) Name() string { return db.name } -// // KeyMayExists the value is only allocated (using malloc) and returned if it is found and -// // value_found isn't NULL. In that case the user is responsible for freeing it. -// func (db *DB) KeyMayExists(opts *ReadOptions, key []byte, timestamp string) (slice *Slice) { -// t := []byte(timestamp) +// KeyMayExists the value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +func (db *DB) KeyMayExists(opts *ReadOptions, key []byte, timestamp string) (slice *Slice) { + t := []byte(timestamp) -// var ( -// cValue *C.char -// cValLen C.size_t -// cKey = byteToChar(key) -// cFound C.uchar -// cTimestamp = byteToChar(t) -// ) + var ( + cValue *C.char + cValLen C.size_t + cKey = byteToChar(key) + cFound C.uchar = 0 + cTimestamp = byteToChar(t) + ) -// C.rocksdb_key_may_exist(db.c, opts.c, -// cKey, C.size_t(len(key)), -// &cValue, &cValLen, -// cTimestamp, C.size_t(len(t)), -// &cFound) + C.rocksdb_key_may_exist(db.c, opts.c, + cKey, C.size_t(len(key)), + &cValue, &cValLen, + cTimestamp, C.size_t(len(t)), + &cFound) -// if charToBool(cFound) { -// slice = NewSlice(cValue, cValLen) -// } + if charToBool(cFound) { + slice = NewSlice(cValue, cValLen) + } -// return -// } + return +} -// // KeyMayExistsCF the value is only allocated (using malloc) and returned if it is found and -// // value_found isn't NULL. In that case the user is responsible for freeing it. -// func (db *DB) KeyMayExistsCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte, timestamp string) (slice *Slice) { -// t := []byte(timestamp) +// KeyMayExistsCF the value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +func (db *DB) KeyMayExistsCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte, timestamp string) (slice *Slice) { + t := []byte(timestamp) -// var ( -// cValue *C.char -// cValLen C.size_t -// cKey = byteToChar(key) -// cFound C.uchar -// cTimestamp = byteToChar(t) -// ) + var ( + cValue *C.char + cValLen C.size_t + cKey = byteToChar(key) + cFound C.uchar = 0 + cTimestamp = byteToChar(t) + ) -// C.rocksdb_key_may_exist_cf(db.c, opts.c, -// cf.c, -// cKey, C.size_t(len(key)), -// &cValue, &cValLen, -// cTimestamp, C.size_t(len(t)), -// &cFound) + C.rocksdb_key_may_exist_cf(db.c, opts.c, + cf.c, + cKey, C.size_t(len(key)), + &cValue, &cValLen, + cTimestamp, C.size_t(len(t)), + &cFound) -// if charToBool(cFound) { -// slice = NewSlice(cValue, cValLen) -// } + if charToBool(cFound) { + slice = NewSlice(cValue, cValLen) + } -// return -// } + return +} // Get returns the data associated with the key from the database. func (db *DB) Get(opts *ReadOptions, key []byte) (slice *Slice, err error) { diff --git a/db_external_file_test.go b/db_external_file_test.go index b618244..a3f06a3 100644 --- a/db_external_file_test.go +++ b/db_external_file_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestExternalFile(t *testing.T) { @@ -18,40 +18,40 @@ func TestExternalFile(t *testing.T) { defer w.Destroy() filePath, err := ioutil.TempFile("", "sst-file-test") - ensure.Nil(t, err) + require.Nil(t, err) defer os.Remove(filePath.Name()) err = w.Open(filePath.Name()) - ensure.Nil(t, err) + require.Nil(t, err) err = w.Add([]byte("aaa"), []byte("aaaValue")) - ensure.Nil(t, err) + require.Nil(t, err) err = w.Add([]byte("bbb"), []byte("bbbValue")) - ensure.Nil(t, err) + require.Nil(t, err) err = w.Add([]byte("ccc"), []byte("cccValue")) - ensure.Nil(t, err) + require.Nil(t, err) err = w.Add([]byte("ddd"), []byte("dddValue")) - ensure.Nil(t, err) + require.Nil(t, err) err = w.Finish() - ensure.Nil(t, err) + require.Nil(t, err) ingestOpts := NewDefaultIngestExternalFileOptions() err = db.IngestExternalFile([]string{filePath.Name()}, ingestOpts) - ensure.Nil(t, err) + require.Nil(t, err) readOpts := NewDefaultReadOptions() v1, err := db.Get(readOpts, []byte("aaa")) - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), []byte("aaaValue")) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), []byte("aaaValue")) v2, err := db.Get(readOpts, []byte("bbb")) - ensure.Nil(t, err) - ensure.DeepEqual(t, v2.Data(), []byte("bbbValue")) + require.Nil(t, err) + require.EqualValues(t, v2.Data(), []byte("bbbValue")) v3, err := db.Get(readOpts, []byte("ccc")) - ensure.Nil(t, err) - ensure.DeepEqual(t, v3.Data(), []byte("cccValue")) + require.Nil(t, err) + require.EqualValues(t, v3.Data(), []byte("cccValue")) v4, err := db.Get(readOpts, []byte("ddd")) - ensure.Nil(t, err) - ensure.DeepEqual(t, v4.Data(), []byte("dddValue")) + require.Nil(t, err) + require.EqualValues(t, v4.Data(), []byte("dddValue")) } diff --git a/db_test.go b/db_test.go index d51c32f..61df886 100755 --- a/db_test.go +++ b/db_test.go @@ -5,16 +5,16 @@ import ( "strconv" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestOpenDb(t *testing.T) { db := newTestDB(t, "TestOpenDb", nil) defer db.Close() - ensure.DeepEqual(t, "0", db.GetProperty("rocksdb.num-immutable-mem-table")) + require.EqualValues(t, "0", db.GetProperty("rocksdb.num-immutable-mem-table")) v, success := db.GetIntProperty("rocksdb.num-immutable-mem-table") - ensure.DeepEqual(t, uint64(0), v) - ensure.True(t, success) + require.EqualValues(t, uint64(0), v) + require.True(t, success) } func TestDBCRUD(t *testing.T) { @@ -30,43 +30,43 @@ func TestDBCRUD(t *testing.T) { ) // create - ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) + require.Nil(t, db.Put(wo, givenKey, givenVal1)) // retrieve v1, err := db.Get(ro, givenKey) defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), givenVal1) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), givenVal1) // retrieve bytes _v1, err := db.GetBytes(ro, givenKey) - ensure.Nil(t, err) - ensure.DeepEqual(t, _v1, givenVal1) + require.Nil(t, err) + require.EqualValues(t, _v1, givenVal1) // update - ensure.Nil(t, db.Put(wo, givenKey, givenVal2)) + require.Nil(t, db.Put(wo, givenKey, givenVal2)) v2, err := db.Get(ro, givenKey) defer v2.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v2.Data(), givenVal2) + require.Nil(t, err) + require.EqualValues(t, v2.Data(), givenVal2) // retrieve pinned v3, err := db.GetPinned(ro, givenKey) defer v3.Destroy() - ensure.Nil(t, err) - ensure.DeepEqual(t, v3.Data(), givenVal2) + require.Nil(t, err) + require.EqualValues(t, v3.Data(), givenVal2) // delete - ensure.Nil(t, db.Delete(wo, givenKey)) + require.Nil(t, db.Delete(wo, givenKey)) v4, err := db.Get(ro, givenKey) - ensure.Nil(t, err) - ensure.True(t, v4.Data() == nil) + require.Nil(t, err) + require.True(t, v4.Data() == nil) // retrieve missing pinned v5, err := db.GetPinned(ro, givenKey) defer v5.Destroy() - ensure.Nil(t, err) - ensure.True(t, v5.Data() == nil) + require.Nil(t, err) + require.True(t, v5.Data() == nil) } func TestDBCRUDDBPaths(t *testing.T) { @@ -93,48 +93,54 @@ func TestDBCRUDDBPaths(t *testing.T) { // retrieve before create noexist, err := db.Get(ro, givenKey) defer noexist.Free() - ensure.Nil(t, err) - ensure.False(t, noexist.Exists()) - ensure.DeepEqual(t, noexist.Data(), []byte(nil)) + require.Nil(t, err) + require.False(t, noexist.Exists()) + require.EqualValues(t, noexist.Data(), []byte(nil)) // create - ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) + require.Nil(t, db.Put(wo, givenKey, givenVal1)) // retrieve v1, err := db.Get(ro, givenKey) defer v1.Free() - ensure.Nil(t, err) - ensure.True(t, v1.Exists()) - ensure.DeepEqual(t, v1.Data(), givenVal1) + require.Nil(t, err) + require.True(t, v1.Exists()) + require.EqualValues(t, v1.Data(), givenVal1) // update - ensure.Nil(t, db.Put(wo, givenKey, givenVal2)) + require.Nil(t, db.Put(wo, givenKey, givenVal2)) v2, err := db.Get(ro, givenKey) defer v2.Free() - ensure.Nil(t, err) - ensure.True(t, v2.Exists()) - ensure.DeepEqual(t, v2.Data(), givenVal2) + require.Nil(t, err) + require.True(t, v2.Exists()) + require.EqualValues(t, v2.Data(), givenVal2) // update - ensure.Nil(t, db.Put(wo, givenKey, givenVal3)) + require.Nil(t, db.Put(wo, givenKey, givenVal3)) v3, err := db.Get(ro, givenKey) defer v3.Free() - ensure.Nil(t, err) - ensure.True(t, v3.Exists()) - ensure.DeepEqual(t, v3.Data(), givenVal3) + require.Nil(t, err) + require.True(t, v3.Exists()) + require.EqualValues(t, v3.Data(), givenVal3) + + { + v4 := db.KeyMayExists(ro, givenKey, "") + defer v4.Free() + require.True(t, v4.Size() > 0) + } // delete - ensure.Nil(t, db.Delete(wo, givenKey)) + require.Nil(t, db.Delete(wo, givenKey)) v4, err := db.Get(ro, givenKey) defer v4.Free() - ensure.Nil(t, err) - ensure.False(t, v4.Exists()) - ensure.DeepEqual(t, v4.Data(), []byte(nil)) + require.Nil(t, err) + require.False(t, v4.Exists()) + require.EqualValues(t, v4.Data(), []byte(nil)) } func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { dir, err := ioutil.TempDir("", "gorocksdb-"+name) - ensure.Nil(t, err) + require.Nil(t, err) opts := NewDefaultOptions() // test the ratelimiter @@ -146,14 +152,14 @@ func newTestDB(t *testing.T, name string, applyOpts func(opts *Options)) *DB { applyOpts(opts) } db, err := OpenDb(opts, dir) - ensure.Nil(t, err) + require.Nil(t, err) return db } func newTestDBMultiCF(t *testing.T, name string, columns []string, applyOpts func(opts *Options)) (db *DB, cfh []*ColumnFamilyHandle, cleanup func()) { dir, err := ioutil.TempDir("", "gorocksdb-"+name) - ensure.Nil(t, err) + require.Nil(t, err) opts := NewDefaultOptions() opts.SetCreateIfMissingColumnFamilies(true) @@ -169,7 +175,7 @@ func newTestDBMultiCF(t *testing.T, name string, columns []string, applyOpts fun } db, cfh, err = OpenDbColumnFamilies(opts, dir, columns, options) - ensure.Nil(t, err) + require.Nil(t, err) cleanup = func() { for _, cf := range cfh { cf.Destroy() @@ -180,16 +186,16 @@ func newTestDBMultiCF(t *testing.T, name string, columns []string, applyOpts fun } func newTestDBPathNames(t *testing.T, name string, names []string, targetSizes []uint64, applyOpts func(opts *Options)) *DB { - ensure.DeepEqual(t, len(targetSizes), len(names)) - ensure.NotDeepEqual(t, len(names), 0) + require.EqualValues(t, len(targetSizes), len(names)) + require.True(t, len(names) > 0) dir, err := ioutil.TempDir("", "gorocksdb-"+name) - ensure.Nil(t, err) + require.Nil(t, err) paths := make([]string, len(names)) for i, name := range names { dir, err := ioutil.TempDir("", "gorocksdb-"+name) - ensure.Nil(t, err) + require.Nil(t, err) paths[i] = dir } @@ -206,7 +212,7 @@ func newTestDBPathNames(t *testing.T, name string, names []string, targetSizes [ applyOpts(opts) } db, err := OpenDb(opts, dir) - ensure.Nil(t, err) + require.Nil(t, err) return db } @@ -227,20 +233,20 @@ func TestDBMultiGet(t *testing.T) { ) // create - ensure.Nil(t, db.Put(wo, givenKey1, givenVal1)) - ensure.Nil(t, db.Put(wo, givenKey2, givenVal2)) - ensure.Nil(t, db.Put(wo, givenKey3, givenVal3)) + require.Nil(t, db.Put(wo, givenKey1, givenVal1)) + require.Nil(t, db.Put(wo, givenKey2, givenVal2)) + require.Nil(t, db.Put(wo, givenKey3, givenVal3)) // retrieve values, err := db.MultiGet(ro, []byte("noexist"), givenKey1, givenKey2, givenKey3) defer values.Destroy() - ensure.Nil(t, err) - ensure.DeepEqual(t, len(values), 4) + require.Nil(t, err) + require.EqualValues(t, len(values), 4) - ensure.DeepEqual(t, values[0].Data(), []byte(nil)) - ensure.DeepEqual(t, values[1].Data(), givenVal1) - ensure.DeepEqual(t, values[2].Data(), givenVal2) - ensure.DeepEqual(t, values[3].Data(), givenVal3) + require.EqualValues(t, values[0].Data(), []byte(nil)) + require.EqualValues(t, values[1].Data(), givenVal1) + require.EqualValues(t, values[2].Data(), givenVal2) + require.EqualValues(t, values[3].Data(), givenVal3) } func TestDBGetApproximateSizes(t *testing.T) { @@ -249,15 +255,15 @@ func TestDBGetApproximateSizes(t *testing.T) { // no ranges sizes := db.GetApproximateSizes(nil) - ensure.DeepEqual(t, len(sizes), 0) + require.EqualValues(t, len(sizes), 0) // range will nil start and limit sizes = db.GetApproximateSizes([]Range{{Start: nil, Limit: nil}}) - ensure.DeepEqual(t, sizes, []uint64{0}) + require.EqualValues(t, sizes, []uint64{0}) // valid range sizes = db.GetApproximateSizes([]Range{{Start: []byte{0x00}, Limit: []byte{0xFF}}}) - ensure.DeepEqual(t, sizes, []uint64{0}) + require.EqualValues(t, sizes, []uint64{0}) } func TestDBGetApproximateSizesCF(t *testing.T) { @@ -267,17 +273,17 @@ func TestDBGetApproximateSizesCF(t *testing.T) { o := NewDefaultOptions() cf, err := db.CreateColumnFamily(o, "other") - ensure.Nil(t, err) + require.Nil(t, err) // no ranges sizes := db.GetApproximateSizesCF(cf, nil) - ensure.DeepEqual(t, len(sizes), 0) + require.EqualValues(t, len(sizes), 0) // range will nil start and limit sizes = db.GetApproximateSizesCF(cf, []Range{{Start: nil, Limit: nil}}) - ensure.DeepEqual(t, sizes, []uint64{0}) + require.EqualValues(t, sizes, []uint64{0}) // valid range sizes = db.GetApproximateSizesCF(cf, []Range{{Start: []byte{0x00}, Limit: []byte{0xFF}}}) - ensure.DeepEqual(t, sizes, []uint64{0}) + require.EqualValues(t, sizes, []uint64{0}) } diff --git a/dist/darwin_amd64/include/rocksdb/advanced_options.h b/dist/darwin_amd64/include/rocksdb/advanced_options.h index 574e939..5c3a19a 100644 --- a/dist/darwin_amd64/include/rocksdb/advanced_options.h +++ b/dist/darwin_amd64/include/rocksdb/advanced_options.h @@ -643,6 +643,8 @@ struct AdvancedColumnFamilyOptions { bool optimize_filters_for_hits = false; // After writing every SST file, reopen it and read all the keys. + // Checks the hash of all of the keys and values written versus the + // keys in the file and signals a corruption if they do not match // // Default: false // diff --git a/dist/darwin_amd64/include/rocksdb/c.h b/dist/darwin_amd64/include/rocksdb/c.h index 0d07e30..37da5f1 100644 --- a/dist/darwin_amd64/include/rocksdb/c.h +++ b/dist/darwin_amd64/include/rocksdb/c.h @@ -171,6 +171,11 @@ rocksdb_backup_engine_restore_db_from_latest_backup( rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, const rocksdb_restore_options_t* restore_options, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, + char** errptr); + extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); @@ -320,6 +325,21 @@ extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( const size_t* keys_list_sizes, char** values_list, size_t* values_list_sizes, char** errs); +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( rocksdb_t* db, const rocksdb_readoptions_t* options); @@ -966,106 +986,192 @@ extern ROCKSDB_LIBRARY_API uint32_t rocksdb_options_get_max_subcompactions(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_base_background_compactions( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_base_background_compactions( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_log_file_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_rate_limit( rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_soft_rate_limit( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_rate_limit( rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_hard_rate_limit( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_pending_compaction_bytes_limit( rocksdb_options_t* opt, size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_pending_compaction_bytes_limit( rocksdb_options_t* opt, size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_rate_limit_delay_max_milliseconds(rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_rate_limit_delay_max_milliseconds(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_remove_scan_count_limit(rocksdb_options_t*, int); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_arena_block_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( rocksdb_options_t*, const char*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, const char*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t*, unsigned char); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_log_error_on_recovery( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_log_error_on_recovery(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_access_hint_on_compaction_start(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_prefix_bloom_size_ratio( rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( rocksdb_options_t*, size_t, int32_t, int32_t); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( @@ -1078,17 +1184,29 @@ extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_successive_merges(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_bloom_locality(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_inplace_update_support(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( + rocksdb_options_t*); enum { rocksdb_tolerate_corrupted_tail_records_recovery = 0, @@ -1098,6 +1216,8 @@ enum { }; extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( + rocksdb_options_t*); enum { rocksdb_no_compression = 0, @@ -1111,8 +1231,12 @@ enum { }; extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( + rocksdb_options_t*); enum { rocksdb_level_compaction = 0, @@ -1121,6 +1245,8 @@ enum { }; extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_universal_compaction_options( rocksdb_options_t*, rocksdb_universal_compaction_options_t*); @@ -1130,6 +1256,8 @@ extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( + rocksdb_options_t* opt); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( rocksdb_options_t* opt, rocksdb_cache_t* cache diff --git a/dist/darwin_amd64/include/rocksdb/compaction_filter.h b/dist/darwin_amd64/include/rocksdb/compaction_filter.h index 9765078..ed17889 100644 --- a/dist/darwin_amd64/include/rocksdb/compaction_filter.h +++ b/dist/darwin_amd64/include/rocksdb/compaction_filter.h @@ -45,6 +45,8 @@ class CompactionFilter { kRemove, kChangeValue, kRemoveAndSkipUntil, + kChangeBlobIndex, // used internally by BlobDB. + kIOError, // used internally by BlobDB. }; enum class BlobDecision { kKeep, kChangeValue, kCorruption, kIOError }; diff --git a/dist/darwin_amd64/include/rocksdb/db.h b/dist/darwin_amd64/include/rocksdb/db.h index 08609f3..5d69a2a 100644 --- a/dist/darwin_amd64/include/rocksdb/db.h +++ b/dist/darwin_amd64/include/rocksdb/db.h @@ -1354,6 +1354,9 @@ class DB { virtual void GetLiveFilesMetaData( std::vector* /*metadata*/) {} + // Return a list of all table checksum info + virtual Status GetLiveFilesChecksumInfo(FileChecksumList* checksum_list) = 0; + // Obtains the meta data of the specified column family of the DB. virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/, ColumnFamilyMetaData* /*metadata*/) {} @@ -1543,6 +1546,13 @@ class DB { // Returns Status::OK if identity could be set properly virtual Status GetDbIdentity(std::string& identity) const = 0; + // Return a unique identifier for each DB object that is opened + // This DB session ID should be unique among all open DB instances on all + // hosts, and should be unique among re-openings of the same or other DBs. + // (Two open DBs have the same identity from other function GetDbIdentity when + // one is physically copied from the other.) + virtual Status GetDbSessionId(std::string& session_id) const = 0; + // Returns default column family handle virtual ColumnFamilyHandle* DefaultColumnFamily() const = 0; diff --git a/dist/darwin_amd64/include/rocksdb/env.h b/dist/darwin_amd64/include/rocksdb/env.h index e1c54d7..4af2171 100644 --- a/dist/darwin_amd64/include/rocksdb/env.h +++ b/dist/darwin_amd64/include/rocksdb/env.h @@ -61,6 +61,13 @@ class FileSystem; const size_t kDefaultPageSize = 4 * 1024; +enum class CpuPriority { + kIdle = 0, + kLow = 1, + kNormal = 2, + kHigh = 3, +}; + // Options while opening a file to read/write struct EnvOptions { // Construct with default Options @@ -474,6 +481,13 @@ class Env { // Lower IO priority for threads from the specified pool. virtual void LowerThreadPoolIOPriority(Priority /*pool*/ = LOW) {} + // Lower CPU priority for threads from the specified pool. + virtual Status LowerThreadPoolCPUPriority(Priority /*pool*/, + CpuPriority /*pri*/) { + return Status::NotSupported( + "Env::LowerThreadPoolCPUPriority(Priority, CpuPriority) not supported"); + } + // Lower CPU priority for threads from the specified pool. virtual void LowerThreadPoolCPUPriority(Priority /*pool*/ = LOW) {} @@ -1355,14 +1369,18 @@ class EnvWrapper : public Env { return target_->IncBackgroundThreadsIfNeeded(num, pri); } - void LowerThreadPoolIOPriority(Priority pool = LOW) override { + void LowerThreadPoolIOPriority(Priority pool) override { target_->LowerThreadPoolIOPriority(pool); } - void LowerThreadPoolCPUPriority(Priority pool = LOW) override { + void LowerThreadPoolCPUPriority(Priority pool) override { target_->LowerThreadPoolCPUPriority(pool); } + Status LowerThreadPoolCPUPriority(Priority pool, CpuPriority pri) override { + return target_->LowerThreadPoolCPUPriority(pool, pri); + } + std::string TimeToString(uint64_t time) override { return target_->TimeToString(time); } diff --git a/dist/darwin_amd64/include/rocksdb/env_encryption.h b/dist/darwin_amd64/include/rocksdb/env_encryption.h index a4db10f..e7c7e35 100644 --- a/dist/darwin_amd64/include/rocksdb/env_encryption.h +++ b/dist/darwin_amd64/include/rocksdb/env_encryption.h @@ -169,8 +169,10 @@ class CTREncryptionProvider : public EncryptionProvider { virtual ~CTREncryptionProvider() {} // GetPrefixLength returns the length of the prefix that is added to every - // file and used for storing encryption options. For optimal performance, the - // prefix length should be a multiple of the page size. + // file + // and used for storing encryption options. + // For optimal performance, the prefix length should be a multiple of + // the page size. virtual size_t GetPrefixLength() override; // CreateNewPrefix initialized an allocated block of prefix memory @@ -194,13 +196,243 @@ class CTREncryptionProvider : public EncryptionProvider { size_t blockSize); // CreateCipherStreamFromPrefix creates a block access cipher stream for a - // file given given name and options. The given prefix is already decrypted. + // file given + // given name and options. The given prefix is already decrypted. virtual Status CreateCipherStreamFromPrefix( const std::string& fname, const EnvOptions& options, uint64_t initialCounter, const Slice& iv, const Slice& prefix, std::unique_ptr* result); }; +class EncryptedSequentialFile : public SequentialFile { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + uint64_t offset_; + size_t prefixLength_; + + public: + // Default ctor. Given underlying sequential file is supposed to be at + // offset == prefixLength. + EncryptedSequentialFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : file_(std::move(f)), + stream_(std::move(s)), + offset_(prefixLength), + prefixLength_(prefixLength) {} + + // Read up to "n" bytes from the file. "scratch[0..n-1]" may be + // written by this routine. Sets "*result" to the data that was + // read (including if fewer than "n" bytes were successfully read). + // May set "*result" to point at data in "scratch[0..n-1]", so + // "scratch[0..n-1]" must be live when "*result" is used. + // If an error was encountered, returns a non-OK status. + // + // REQUIRES: External synchronization + virtual Status Read(size_t n, Slice* result, char* scratch) override; + + // Skip "n" bytes from the file. This is guaranteed to be no + // slower that reading the same data, but may be faster. + // + // If end of file is reached, skipping will stop at the end of the + // file, and Skip will return OK. + // + // REQUIRES: External synchronization + virtual Status Skip(uint64_t n) override; + + // Indicates the upper layers if the current SequentialFile implementation + // uses direct IO. + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + virtual Status InvalidateCache(size_t offset, size_t length) override; + + // Positioned Read for direct I/O + // If Direct I/O enabled, offset, n, and scratch should be properly aligned + virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, + char* scratch) override; +}; + +// A file abstraction for randomly reading the contents of a file. +class EncryptedRandomAccessFile : public RandomAccessFile { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + size_t prefixLength_; + + public: + EncryptedRandomAccessFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : file_(std::move(f)), + stream_(std::move(s)), + prefixLength_(prefixLength) {} + + // Read up to "n" bytes from the file starting at "offset". + // "scratch[0..n-1]" may be written by this routine. Sets "*result" + // to the data that was read (including if fewer than "n" bytes were + // successfully read). May set "*result" to point at data in + // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when + // "*result" is used. If an error was encountered, returns a non-OK + // status. + // + // Safe for concurrent use by multiple threads. + // If Direct I/O enabled, offset, n, and scratch should be aligned properly. + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override; + + // Readahead the file starting from offset by n bytes for caching. + virtual Status Prefetch(uint64_t offset, size_t n) override; + + // Tries to get an unique ID for this file that will be the same each time + // the file is opened (and will stay the same while the file is open). + // Furthermore, it tries to make this ID at most "max_size" bytes. If such an + // ID can be created this function returns the length of the ID and places it + // in "id"; otherwise, this function returns 0, in which case "id" + // may not have been modified. + // + // This function guarantees, for IDs from a given environment, two unique ids + // cannot be made equal to each other by adding arbitrary bytes to one of + // them. That is, no unique ID is the prefix of another. + // + // This function guarantees that the returned ID will not be interpretable as + // a single varint. + // + // Note: these IDs are only valid for the duration of the process. + virtual size_t GetUniqueId(char* id, size_t max_size) const override; + + virtual void Hint(AccessPattern pattern) override; + + // Indicates the upper layers if the current RandomAccessFile implementation + // uses direct IO. + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + virtual Status InvalidateCache(size_t offset, size_t length) override; +}; + +// A file abstraction for sequential writing. The implementation +// must provide buffering since callers may append small fragments +// at a time to the file. +class EncryptedWritableFile : public WritableFileWrapper { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + size_t prefixLength_; + + public: + // Default ctor. Prefix is assumed to be written already. + EncryptedWritableFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : WritableFileWrapper(f.get()), + file_(std::move(f)), + stream_(std::move(s)), + prefixLength_(prefixLength) {} + + Status Append(const Slice& data) override; + + Status PositionedAppend(const Slice& data, uint64_t offset) override; + + // Indicates the upper layers if the current WritableFile implementation + // uses direct IO. + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + /* + * Get the size of valid data in the file. + */ + virtual uint64_t GetFileSize() override; + + // Truncate is necessary to trim the file to the correct size + // before closing. It is not always possible to keep track of the file + // size due to whole pages writes. The behavior is undefined if called + // with other writes to follow. + virtual Status Truncate(uint64_t size) override; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + // This call has no effect on dirty pages in the cache. + virtual Status InvalidateCache(size_t offset, size_t length) override; + + // Sync a file range with disk. + // offset is the starting byte of the file range to be synchronized. + // nbytes specifies the length of the range to be synchronized. + // This asks the OS to initiate flushing the cached data to disk, + // without waiting for completion. + // Default implementation does nothing. + virtual Status RangeSync(uint64_t offset, uint64_t nbytes) override; + + // PrepareWrite performs any necessary preparation for a write + // before the write actually occurs. This allows for pre-allocation + // of space on devices where it can result in less file + // fragmentation and/or less waste from over-zealous filesystem + // pre-allocation. + virtual void PrepareWrite(size_t offset, size_t len) override; + + // Pre-allocates space for a file. + virtual Status Allocate(uint64_t offset, uint64_t len) override; +}; + +// A file abstraction for random reading and writing. +class EncryptedRandomRWFile : public RandomRWFile { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + size_t prefixLength_; + + public: + EncryptedRandomRWFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : file_(std::move(f)), + stream_(std::move(s)), + prefixLength_(prefixLength) {} + + // Indicates if the class makes use of direct I/O + // If false you must pass aligned buffer to Write() + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + // Write bytes in `data` at offset `offset`, Returns Status::OK() on success. + // Pass aligned buffer when use_direct_io() returns true. + virtual Status Write(uint64_t offset, const Slice& data) override; + + // Read up to `n` bytes starting from offset `offset` and store them in + // result, provided `scratch` size should be at least `n`. + // Returns Status::OK() on success. + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override; + + virtual Status Flush() override; + + virtual Status Sync() override; + + virtual Status Fsync() override; + + virtual Status Close() override; +}; + } // namespace ROCKSDB_NAMESPACE #endif // !defined(ROCKSDB_LITE) diff --git a/dist/darwin_amd64/include/rocksdb/file_system.h b/dist/darwin_amd64/include/rocksdb/file_system.h index 13a2602..8292f0e 100644 --- a/dist/darwin_amd64/include/rocksdb/file_system.h +++ b/dist/darwin_amd64/include/rocksdb/file_system.h @@ -869,7 +869,8 @@ class FSWritableFile { size_t num_spanned_blocks = new_last_preallocated_block - last_preallocated_block_; Allocate(block_size * last_preallocated_block_, - block_size * num_spanned_blocks, options, dbg); + block_size * num_spanned_blocks, options, dbg) + .PermitUncheckedError(); last_preallocated_block_ = new_last_preallocated_block; } } @@ -1212,8 +1213,9 @@ class FileSystemWrapper : public FileSystem { class FSSequentialFileWrapper : public FSSequentialFile { public: - explicit FSSequentialFileWrapper(FSSequentialFile* target) - : target_(target) {} + explicit FSSequentialFileWrapper(FSSequentialFile* t) : target_(t) {} + + FSSequentialFile* target() const { return target_; } IOStatus Read(size_t n, const IOOptions& options, Slice* result, char* scratch, IODebugContext* dbg) override { @@ -1239,8 +1241,9 @@ class FSSequentialFileWrapper : public FSSequentialFile { class FSRandomAccessFileWrapper : public FSRandomAccessFile { public: - explicit FSRandomAccessFileWrapper(FSRandomAccessFile* target) - : target_(target) {} + explicit FSRandomAccessFileWrapper(FSRandomAccessFile* t) : target_(t) {} + + FSRandomAccessFile* target() const { return target_; } IOStatus Read(uint64_t offset, size_t n, const IOOptions& options, Slice* result, char* scratch, @@ -1275,6 +1278,8 @@ class FSWritableFileWrapper : public FSWritableFile { public: explicit FSWritableFileWrapper(FSWritableFile* t) : target_(t) {} + FSWritableFile* target() const { return target_; } + IOStatus Append(const Slice& data, const IOOptions& options, IODebugContext* dbg) override { return target_->Append(data, options, dbg); @@ -1358,7 +1363,9 @@ class FSWritableFileWrapper : public FSWritableFile { class FSRandomRWFileWrapper : public FSRandomRWFile { public: - explicit FSRandomRWFileWrapper(FSRandomRWFile* target) : target_(target) {} + explicit FSRandomRWFileWrapper(FSRandomRWFile* t) : target_(t) {} + + FSRandomRWFile* target() const { return target_; } bool use_direct_io() const override { return target_->use_direct_io(); } size_t GetRequiredBufferAlignment() const override { @@ -1392,7 +1399,7 @@ class FSRandomRWFileWrapper : public FSRandomRWFile { class FSDirectoryWrapper : public FSDirectory { public: - explicit FSDirectoryWrapper(FSDirectory* target) : target_(target) {} + explicit FSDirectoryWrapper(FSDirectory* t) : target_(t) {} IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override { return target_->Fsync(options, dbg); diff --git a/dist/darwin_amd64/include/rocksdb/listener.h b/dist/darwin_amd64/include/rocksdb/listener.h index 9757071..0d3c76d 100644 --- a/dist/darwin_amd64/include/rocksdb/listener.h +++ b/dist/darwin_amd64/include/rocksdb/listener.h @@ -149,19 +149,49 @@ struct TableFileDeletionInfo { Status status; }; -struct FileOperationInfo { - using TimePoint = std::chrono::time_point; +enum class FileOperationType { + kRead, + kWrite, + kTruncate, + kClose, + kFlush, + kSync, + kFsync, + kRangeSync +}; +struct FileOperationInfo { + using Duration = std::chrono::nanoseconds; + using SteadyTimePoint = + std::chrono::time_point; + using SystemTimePoint = + std::chrono::time_point; + using StartTimePoint = std::pair; + using FinishTimePoint = SteadyTimePoint; + + FileOperationType type; const std::string& path; uint64_t offset; size_t length; - const TimePoint& start_timestamp; - const TimePoint& finish_timestamp; + const Duration duration; + const SystemTimePoint& start_ts; Status status; - FileOperationInfo(const std::string& _path, const TimePoint& start, - const TimePoint& finish) - : path(_path), start_timestamp(start), finish_timestamp(finish) {} + FileOperationInfo(const FileOperationType _type, const std::string& _path, + const StartTimePoint& _start_ts, + const FinishTimePoint& _finish_ts, const Status& _status) + : type(_type), + path(_path), + duration(std::chrono::duration_cast( + _finish_ts - _start_ts.second)), + start_ts(_start_ts.first), + status(_status) {} + static StartTimePoint StartNow() { + return std::make_pair( + std::chrono::system_clock::now(), std::chrono::steady_clock::now()); + } + static FinishTimePoint FinishNow() { + return std::chrono::steady_clock::now(); + } }; struct FlushJobInfo { @@ -460,7 +490,27 @@ class EventListener { // operation finishes. virtual void OnFileWriteFinish(const FileOperationInfo& /* info */) {} - // If true, the OnFileReadFinish and OnFileWriteFinish will be called. If + // A callback function for RocksDB which will be called whenever a file flush + // operation finishes. + virtual void OnFileFlushFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file sync + // operation finishes. + virtual void OnFileSyncFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file + // rangeSync operation finishes. + virtual void OnFileRangeSyncFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file + // truncate operation finishes. + virtual void OnFileTruncateFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file close + // operation finishes. + virtual void OnFileCloseFinish(const FileOperationInfo& /* info */) {} + + // If true, the OnFile*Finish functions will be called. If // false, then they won't be called. virtual bool ShouldBeNotifiedOnFileIO() { return false; } diff --git a/dist/darwin_amd64/include/rocksdb/metadata.h b/dist/darwin_amd64/include/rocksdb/metadata.h index f1a9ee6..9a64a7a 100644 --- a/dist/darwin_amd64/include/rocksdb/metadata.h +++ b/dist/darwin_amd64/include/rocksdb/metadata.h @@ -62,7 +62,9 @@ struct SstFileMetaData { being_compacted(false), num_entries(0), num_deletions(0), - oldest_blob_file_number(0) {} + oldest_blob_file_number(0), + oldest_ancester_time(0), + file_creation_time(0) {} SstFileMetaData(const std::string& _file_name, uint64_t _file_number, const std::string& _path, size_t _size, @@ -117,6 +119,8 @@ struct SstFileMetaData { // oldest SST file that is the compaction ancester of this file. // The timestamp is provided Env::GetCurrentTime(). // 0 if the information is not available. + // + // Note: for TTL blob files, it contains the start of the expiration range. uint64_t oldest_ancester_time; // Timestamp when the SST file is created, provided by Env::GetCurrentTime(). // 0 if the information is not available. diff --git a/dist/darwin_amd64/include/rocksdb/options.h b/dist/darwin_amd64/include/rocksdb/options.h index 040892e..5c75bd0 100644 --- a/dist/darwin_amd64/include/rocksdb/options.h +++ b/dist/darwin_amd64/include/rocksdb/options.h @@ -10,6 +10,7 @@ #include #include + #include #include #include @@ -21,6 +22,7 @@ #include "rocksdb/env.h" #include "rocksdb/file_checksum.h" #include "rocksdb/listener.h" +#include "rocksdb/sst_partitioner.h" #include "rocksdb/universal_compaction.h" #include "rocksdb/version.h" #include "rocksdb/write_buffer_manager.h" @@ -51,13 +53,6 @@ class InternalKeyComparator; class WalFilter; class FileSystem; -enum class CpuPriority { - kIdle = 0, - kLow = 1, - kNormal = 2, - kHigh = 3, -}; - // DB contents are stored in a set of blocks, each of which holds a // sequence of key,value pairs. Each block may be compressed before // being stored in a file. The following enum describes which @@ -315,6 +310,15 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions { // Default: nullptr std::shared_ptr compaction_thread_limiter = nullptr; + // If non-nullptr, use the specified factory for a function to determine the + // partitioning of sst files. This helps compaction to split the files + // on interesting boundaries (key prefixes) to make propagation of sst + // files less write amplifying (covering the whole key space). + // THE FEATURE IS STILL EXPERIMENTAL + // + // Default: nullptr + std::shared_ptr sst_partitioner_factory = nullptr; + // Create ColumnFamilyOptions with default values for all fields ColumnFamilyOptions(); // Create ColumnFamilyOptions from Options @@ -565,6 +569,8 @@ struct DBOptions { // concurrently perform a compaction job by breaking it into multiple, // smaller ones that are run simultaneously. // Default: 1 (i.e. no subcompactions) + // + // Dynamically changeable through SetDBOptions() API. uint32_t max_subcompactions = 1; // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the @@ -1144,6 +1150,23 @@ struct DBOptions { // not be used for recovery if best_efforts_recovery is true. // Default: false bool best_efforts_recovery = false; + + // It defines how many times db resume is called by a separate thread when + // background retryable IO Error happens. When background retryable IO + // Error happens, SetBGError is called to deal with the error. If the error + // can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), + // then db resume is called in background to recover from the error. If this + // value is 0 or negative, db resume will not be called. + // + // Default: INT_MAX + int max_bgerror_resume_count = INT_MAX; + + // If max_bgerror_resume_count is >= 2, db resume is called multiple times. + // This option decides how long to wait to retry the next resume if the + // previous resume fails and satisfy redo resume conditions. + // + // Default: 1000000 (microseconds). + uint64_t bgerror_resume_retry_interval = 1000000; }; // Options to control the behavior of a database (passed to DB::Open) diff --git a/dist/darwin_amd64/include/rocksdb/persistent_cache.h b/dist/darwin_amd64/include/rocksdb/persistent_cache.h index 9651812..e2dcfca 100644 --- a/dist/darwin_amd64/include/rocksdb/persistent_cache.h +++ b/dist/darwin_amd64/include/rocksdb/persistent_cache.h @@ -56,6 +56,12 @@ class PersistentCache { virtual StatsType Stats() = 0; virtual std::string GetPrintableOptions() const = 0; + + // Return a new numeric id. May be used by multiple clients who are + // sharding the same persistent cache to partition the key space. Typically + // the client will allocate a new id at startup and prepend the id to its + // cache keys. + virtual uint64_t NewId() = 0; }; // Factor method to create a new persistent cache diff --git a/dist/darwin_amd64/include/rocksdb/sst_partitioner.h b/dist/darwin_amd64/include/rocksdb/sst_partitioner.h new file mode 100644 index 0000000..5d18195 --- /dev/null +++ b/dist/darwin_amd64/include/rocksdb/sst_partitioner.h @@ -0,0 +1,135 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// + +#pragma once + +#include +#include + +#include "rocksdb/rocksdb_namespace.h" +#include "rocksdb/slice.h" + +namespace ROCKSDB_NAMESPACE { + +class Slice; + +enum PartitionerResult : char { + // Partitioner does not require to create new file + kNotRequired = 0x0, + // Partitioner is requesting forcefully to create new file + kRequired = 0x1 + // Additional constants can be added +}; + +struct PartitionerRequest { + PartitionerRequest(const Slice& prev_user_key_, + const Slice& current_user_key_, + uint64_t current_output_file_size_) + : prev_user_key(&prev_user_key_), + current_user_key(¤t_user_key_), + current_output_file_size(current_output_file_size_) {} + const Slice* prev_user_key; + const Slice* current_user_key; + uint64_t current_output_file_size; +}; + +/* + * A SstPartitioner is a generic pluggable way of defining the partition + * of SST files. Compaction job will split the SST files on partition boundary + * to lower the write amplification during SST file promote to higher level. + */ +class SstPartitioner { + public: + virtual ~SstPartitioner() {} + + // Return the name of this partitioner. + virtual const char* Name() const = 0; + + // It is called for all keys in compaction. When partitioner want to create + // new SST file it needs to return true. It means compaction job will finish + // current SST file where last key is "prev_user_key" parameter and start new + // SST file where first key is "current_user_key". Returns decission if + // partition boundary was detected and compaction should create new file. + virtual PartitionerResult ShouldPartition( + const PartitionerRequest& request) = 0; + + // Called with smallest and largest keys in SST file when compation try to do + // trivial move. Returns true is partitioner allows to do trivial move. + virtual bool CanDoTrivialMove(const Slice& smallest_user_key, + const Slice& largest_user_key) = 0; + + // Context information of a compaction run + struct Context { + // Does this compaction run include all data files + bool is_full_compaction; + // Is this compaction requested by the client (true), + // or is it occurring as an automatic compaction process + bool is_manual_compaction; + // Output level for this compaction + int output_level; + // Smallest key for compaction + Slice smallest_user_key; + // Largest key for compaction + Slice largest_user_key; + }; +}; + +class SstPartitionerFactory { + public: + virtual ~SstPartitionerFactory() {} + + virtual std::unique_ptr CreatePartitioner( + const SstPartitioner::Context& context) const = 0; + + // Returns a name that identifies this partitioner factory. + virtual const char* Name() const = 0; +}; + +/* + * Fixed key prefix partitioner. It splits the output SST files when prefix + * defined by size changes. + */ +class SstPartitionerFixedPrefix : public SstPartitioner { + public: + explicit SstPartitionerFixedPrefix(size_t len) : len_(len) {} + + virtual ~SstPartitionerFixedPrefix() override {} + + const char* Name() const override { return "SstPartitionerFixedPrefix"; } + + PartitionerResult ShouldPartition(const PartitionerRequest& request) override; + + bool CanDoTrivialMove(const Slice& smallest_user_key, + const Slice& largest_user_key) override; + + private: + size_t len_; +}; + +/* + * Factory for fixed prefix partitioner. + */ +class SstPartitionerFixedPrefixFactory : public SstPartitionerFactory { + public: + explicit SstPartitionerFixedPrefixFactory(size_t len) : len_(len) {} + + virtual ~SstPartitionerFixedPrefixFactory() {} + + const char* Name() const override { + return "SstPartitionerFixedPrefixFactory"; + } + + std::unique_ptr CreatePartitioner( + const SstPartitioner::Context& /* context */) const override; + + private: + size_t len_; +}; + +extern std::shared_ptr +NewSstPartitionerFixedPrefixFactory(size_t prefix_len); + +} // namespace ROCKSDB_NAMESPACE diff --git a/dist/darwin_amd64/include/rocksdb/table.h b/dist/darwin_amd64/include/rocksdb/table.h index 47bf60e..95cdb8d 100644 --- a/dist/darwin_amd64/include/rocksdb/table.h +++ b/dist/darwin_amd64/include/rocksdb/table.h @@ -200,6 +200,40 @@ struct BlockBasedTableOptions { // incompatible with block-based filters. bool partition_filters = false; + // EXPERIMENTAL Option to generate Bloom filters that minimize memory + // internal fragmentation. + // + // When false, malloc_usable_size is not available, or format_version < 5, + // filters are generated without regard to internal fragmentation when + // loaded into memory (historical behavior). When true (and + // malloc_usable_size is available and format_version >= 5), then Bloom + // filters are generated to "round up" and "round down" their sizes to + // minimize internal fragmentation when loaded into memory, assuming the + // reading DB has the same memory allocation characteristics as the + // generating DB. This option does not break forward or backward + // compatibility. + // + // While individual filters will vary in bits/key and false positive rate + // when setting is true, the implementation attempts to maintain a weighted + // average FP rate for filters consistent with this option set to false. + // + // With Jemalloc for example, this setting is expected to save about 10% of + // the memory footprint and block cache charge of filters, while increasing + // disk usage of filters by about 1-2% due to encoding efficiency losses + // with variance in bits/key. + // + // NOTE: Because some memory counted by block cache might be unmapped pages + // within internal fragmentation, this option can increase observed RSS + // memory usage. With cache_index_and_filter_blocks=true, this option makes + // the block cache better at using space it is allowed. + // + // NOTE: Do not set to true if you do not trust malloc_usable_size. With + // this option, RocksDB might access an allocated memory object beyond its + // original size if malloc_usable_size says it is safe to do so. While this + // can be considered bad practice, it should not produce undefined behavior + // unless malloc_usable_size is buggy or broken. + bool optimize_filters_for_memory = false; + // Use delta encoding to compress keys in blocks. // ReadOptions::pin_data requires this option to be disabled. // @@ -522,7 +556,19 @@ class TableFactory { const TableReaderOptions& table_reader_options, std::unique_ptr&& file, uint64_t file_size, std::unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache = true) const = 0; + bool prefetch_index_and_filter_in_cache = true) const { + ReadOptions ro; + return NewTableReader(ro, table_reader_options, std::move(file), file_size, + table_reader, prefetch_index_and_filter_in_cache); + } + + // Overload of the above function that allows the caller to pass in a + // ReadOptions + virtual Status NewTableReader( + const ReadOptions& ro, const TableReaderOptions& table_reader_options, + std::unique_ptr&& file, uint64_t file_size, + std::unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache) const = 0; // Return a table builder to write to a file for this table type. // diff --git a/dist/darwin_amd64/include/rocksdb/table_properties.h b/dist/darwin_amd64/include/rocksdb/table_properties.h index 213896f..ba3eca7 100644 --- a/dist/darwin_amd64/include/rocksdb/table_properties.h +++ b/dist/darwin_amd64/include/rocksdb/table_properties.h @@ -30,6 +30,8 @@ typedef std::map UserCollectedProperties; // table properties' human-readable names in the property block. struct TablePropertiesNames { + static const std::string kDbId; + static const std::string kDbSessionId; static const std::string kDataSize; static const std::string kIndexSize; static const std::string kIndexPartitions; @@ -193,6 +195,17 @@ struct TableProperties { // Actual SST file creation time. 0 means unknown. uint64_t file_creation_time = 0; + // DB identity + // db_id is an identifier generated the first time the DB is created + // If DB identity is unset or unassigned, `db_id` will be an empty string. + std::string db_id; + + // DB session identity + // db_session_id is an identifier that gets reset every time the DB is opened + // If DB session identity is unset or unassigned, `db_session_id` will be an + // empty string. + std::string db_session_id; + // Name of the column family with which this SST file is associated. // If column family is unknown, `column_family_name` will be an empty string. std::string column_family_name; diff --git a/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h b/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h index 3f7ec99..5bea203 100644 --- a/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h +++ b/dist/darwin_amd64/include/rocksdb/utilities/backupable_db.h @@ -24,6 +24,11 @@ namespace ROCKSDB_NAMESPACE { +// The default DB file checksum function name. +constexpr char kDbFileChecksumFuncName[] = "FileChecksumCrc32c"; +// The default BackupEngine file checksum function name. +constexpr char kBackupFileChecksumFuncName[] = "crc32c"; + struct BackupableDBOptions { // Where to keep the backup files. Has to be different than dbname_ // Best to set this to dbname_ + "/backups" @@ -87,12 +92,13 @@ struct BackupableDBOptions { // Default: nullptr std::shared_ptr restore_rate_limiter{nullptr}; - // Only used if share_table_files is set to true. If true, will consider that - // backups can come from different databases, hence a sst is not uniquely - // identifed by its name, but by the triple (file name, crc32c, file length) + // Only used if share_table_files is set to true. If true, will consider + // that backups can come from different databases, even differently mutated + // databases with the same DB ID. See share_files_with_checksum_naming and + // ShareFilesNaming for details on how table files names are made + // unique between databases. + // // Default: false - // Note: this is an experimental option, and you'll need to set it manually - // *turn it on only if you know what you're doing* bool share_files_with_checksum; // Up to this many background threads will copy files for CreateNewBackup() @@ -116,6 +122,80 @@ struct BackupableDBOptions { // Default: INT_MAX int max_valid_backups_to_open; + // ShareFilesNaming describes possible naming schemes for backup + // table file names when the table files are stored in the shared_checksum + // directory (i.e., both share_table_files and share_files_with_checksum + // are true). + enum ShareFilesNaming : int { + // Backup SST filenames are __.sst + // where is an unsigned decimal integer. This is the + // original/legacy naming scheme for share_files_with_checksum, + // with two problems: + // * At massive scale, collisions on this triple with different file + // contents is plausible. + // * Determining the name to use requires computing the checksum, + // so generally requires reading the whole file even if the file + // is already backed up. + // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** + kLegacyCrc32cAndFileSize = 1, + + // Backup SST filenames are _s.sst. This + // pair of values should be very strongly unique for a given SST file + // and easily determined before computing a checksum. The 's' indicates + // the value is a DB session id, not a checksum. + // + // Exceptions: + // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize + // will be used instead, matching the names assigned by RocksDB versions + // not supporting the newer naming scheme. + // * See also flags below. + kUseDbSessionId = 2, + + kMaskNoNamingFlags = 0xffff, + + // If not already part of the naming scheme, insert + // _ + // before .sst in the name. In case of user code actually parsing the + // last _ before the .sst as the file size, this preserves that + // feature of kLegacyCrc32cAndFileSize. In other words, this option makes + // official that unofficial feature of the backup metadata. + // + // We do not consider SST file sizes to have sufficient entropy to + // contribute significantly to naming uniqueness. + kFlagIncludeFileSize = 1 << 31, + + // When encountering an SST file from a Facebook-internal early + // release of 6.12, use the default naming scheme in effect for + // when the SST file was generated (assuming full file checksum + // was not set to GetFileChecksumGenCrc32cFactory()). That naming is + // _.sst + // and ignores kFlagIncludeFileSize setting. + // NOTE: This flag is intended to be temporary and should be removed + // in a later release. + kFlagMatchInterimNaming = 1 << 30, + + kMaskNamingFlags = ~kMaskNoNamingFlags, + }; + + // Naming option for share_files_with_checksum table files. See + // ShareFilesNaming for details. + // + // Modifying this option cannot introduce a downgrade compatibility issue + // because RocksDB can read, restore, and delete backups using different file + // names, and it's OK for a backup directory to use a mixture of table file + // naming schemes. + // + // However, modifying this option and saving more backups to the same + // directory can lead to the same file getting saved again to that + // directory, under the new shared name in addition to the old shared + // name. + // + // Default: kUseDbSessionId | kFlagIncludeFileSize | kFlagMatchInterimNaming + // + // Note: This option comes into effect only if both share_files_with_checksum + // and share_table_files are true. + ShareFilesNaming share_files_with_checksum_naming; + void Dump(Logger* logger) const; explicit BackupableDBOptions( @@ -125,7 +205,10 @@ struct BackupableDBOptions { bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024, - int _max_valid_backups_to_open = INT_MAX) + int _max_valid_backups_to_open = INT_MAX, + ShareFilesNaming _share_files_with_checksum_naming = + static_cast(kUseDbSessionId | kFlagIncludeFileSize | + kFlagMatchInterimNaming)) : backup_dir(_backup_dir), backup_env(_backup_env), share_table_files(_share_table_files), @@ -138,18 +221,39 @@ struct BackupableDBOptions { share_files_with_checksum(false), max_background_operations(_max_background_operations), callback_trigger_interval_size(_callback_trigger_interval_size), - max_valid_backups_to_open(_max_valid_backups_to_open) { + max_valid_backups_to_open(_max_valid_backups_to_open), + share_files_with_checksum_naming(_share_files_with_checksum_naming) { assert(share_table_files || !share_files_with_checksum); + assert((share_files_with_checksum_naming & kMaskNoNamingFlags) != 0); } }; +inline BackupableDBOptions::ShareFilesNaming operator&( + BackupableDBOptions::ShareFilesNaming lhs, + BackupableDBOptions::ShareFilesNaming rhs) { + int l = static_cast(lhs); + int r = static_cast(rhs); + assert(r == BackupableDBOptions::kMaskNoNamingFlags || + (r & BackupableDBOptions::kMaskNoNamingFlags) == 0); + return static_cast(l & r); +} + +inline BackupableDBOptions::ShareFilesNaming operator|( + BackupableDBOptions::ShareFilesNaming lhs, + BackupableDBOptions::ShareFilesNaming rhs) { + int l = static_cast(lhs); + int r = static_cast(rhs); + assert((r & BackupableDBOptions::kMaskNoNamingFlags) == 0); + return static_cast(l | r); +} + struct CreateBackupOptions { // Flush will always trigger if 2PC is enabled. // If write-ahead logs are disabled, set flush_before_backup=true to // avoid losing unflushed key/value pairs from the memtable. bool flush_before_backup = false; - // Callback for reporting progress. + // Callback for reporting progress, based on callback_trigger_interval_size. std::function progress_callback = []() {}; // If false, background_thread_cpu_priority is ignored. @@ -274,16 +378,23 @@ class BackupEngineReadOnly { return RestoreDBFromLatestBackup(options, db_dir, wal_dir); } + // If verify_with_checksum is true, this function + // inspects the current checksums and file sizes of backup files to see if + // they match our expectation. + // + // If verify_with_checksum is false, this function // checks that each file exists and that the size of the file matches our - // expectations. it does not check file checksum. + // expectation. It does not check file checksum. // // If this BackupEngine created the backup, it compares the files' current - // sizes against the number of bytes written to them during creation. - // Otherwise, it compares the files' current sizes against their sizes when - // the BackupEngine was opened. + // sizes (and current checksum) against the number of bytes written to + // them (and the checksum calculated) during creation. + // Otherwise, it compares the files' current sizes (and checksums) against + // their sizes (and checksums) when the BackupEngine was opened. // // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id) = 0; + virtual Status VerifyBackup(BackupID backup_id, + bool verify_with_checksum = false) = 0; }; // A backup engine for creating new backups. @@ -395,10 +506,17 @@ class BackupEngine { return RestoreDBFromLatestBackup(options, db_dir, wal_dir); } + // If verify_with_checksum is true, this function + // inspects the current checksums and file sizes of backup files to see if + // they match our expectation. + // + // If verify_with_checksum is false, this function // checks that each file exists and that the size of the file matches our - // expectations. it does not check file checksum. + // expectation. It does not check file checksum. + // // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id) = 0; + virtual Status VerifyBackup(BackupID backup_id, + bool verify_with_checksum = false) = 0; // Will delete any files left over from incomplete creation or deletion of // a backup. This is not normally needed as those operations also clean up diff --git a/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h b/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h index 9888fa2..5a35ca7 100644 --- a/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h +++ b/dist/darwin_amd64/include/rocksdb/utilities/stackable_db.h @@ -347,6 +347,11 @@ class StackableDB : public DB { db_->GetLiveFilesMetaData(metadata); } + virtual Status GetLiveFilesChecksumInfo( + FileChecksumList* checksum_list) override { + return db_->GetLiveFilesChecksumInfo(checksum_list); + } + virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family, ColumnFamilyMetaData* cf_meta) override { db_->GetColumnFamilyMetaData(column_family, cf_meta); @@ -400,6 +405,10 @@ class StackableDB : public DB { return db_->GetDbIdentity(identity); } + virtual Status GetDbSessionId(std::string& session_id) const override { + return db_->GetDbSessionId(session_id); + } + using DB::SetOptions; virtual Status SetOptions(ColumnFamilyHandle* column_family_handle, const std::unordered_map& diff --git a/dist/darwin_amd64/include/rocksdb/version.h b/dist/darwin_amd64/include/rocksdb/version.h index 033a8ca..fb9f46b 100644 --- a/dist/darwin_amd64/include/rocksdb/version.h +++ b/dist/darwin_amd64/include/rocksdb/version.h @@ -5,8 +5,8 @@ #pragma once #define ROCKSDB_MAJOR 6 -#define ROCKSDB_MINOR 11 -#define ROCKSDB_PATCH 6 +#define ROCKSDB_MINOR 12 +#define ROCKSDB_PATCH 7 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these diff --git a/dist/darwin_amd64/lib/liblz4.a b/dist/darwin_amd64/lib/liblz4.a index 5ec7067..dbea7ab 100644 Binary files a/dist/darwin_amd64/lib/liblz4.a and b/dist/darwin_amd64/lib/liblz4.a differ diff --git a/dist/darwin_amd64/lib/librocksdb.a b/dist/darwin_amd64/lib/librocksdb.a index fb12e0f..c39dd95 100644 Binary files a/dist/darwin_amd64/lib/librocksdb.a and b/dist/darwin_amd64/lib/librocksdb.a differ diff --git a/dist/darwin_amd64/lib/libsnappy.a b/dist/darwin_amd64/lib/libsnappy.a index dee570d..dbfc1a1 100644 Binary files a/dist/darwin_amd64/lib/libsnappy.a and b/dist/darwin_amd64/lib/libsnappy.a differ diff --git a/dist/darwin_amd64/lib/libz.a b/dist/darwin_amd64/lib/libz.a index d3974b6..6b00604 100644 Binary files a/dist/darwin_amd64/lib/libz.a and b/dist/darwin_amd64/lib/libz.a differ diff --git a/dist/darwin_amd64/lib/libzstd.a b/dist/darwin_amd64/lib/libzstd.a index 89f37f0..0317ff4 100644 Binary files a/dist/darwin_amd64/lib/libzstd.a and b/dist/darwin_amd64/lib/libzstd.a differ diff --git a/dist/linux_amd64/include/rocksdb/advanced_options.h b/dist/linux_amd64/include/rocksdb/advanced_options.h index 574e939..5c3a19a 100644 --- a/dist/linux_amd64/include/rocksdb/advanced_options.h +++ b/dist/linux_amd64/include/rocksdb/advanced_options.h @@ -643,6 +643,8 @@ struct AdvancedColumnFamilyOptions { bool optimize_filters_for_hits = false; // After writing every SST file, reopen it and read all the keys. + // Checks the hash of all of the keys and values written versus the + // keys in the file and signals a corruption if they do not match // // Default: false // diff --git a/dist/linux_amd64/include/rocksdb/c.h b/dist/linux_amd64/include/rocksdb/c.h index 0d07e30..37da5f1 100644 --- a/dist/linux_amd64/include/rocksdb/c.h +++ b/dist/linux_amd64/include/rocksdb/c.h @@ -171,6 +171,11 @@ rocksdb_backup_engine_restore_db_from_latest_backup( rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, const rocksdb_restore_options_t* restore_options, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, + char** errptr); + extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); @@ -320,6 +325,21 @@ extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( const size_t* keys_list_sizes, char** values_list, size_t* values_list_sizes, char** errs); +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( rocksdb_t* db, const rocksdb_readoptions_t* options); @@ -966,106 +986,192 @@ extern ROCKSDB_LIBRARY_API uint32_t rocksdb_options_get_max_subcompactions(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_base_background_compactions( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_base_background_compactions( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_log_file_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_rate_limit( rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_soft_rate_limit( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_rate_limit( rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_hard_rate_limit( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_pending_compaction_bytes_limit( rocksdb_options_t* opt, size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_pending_compaction_bytes_limit( rocksdb_options_t* opt, size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_rate_limit_delay_max_milliseconds(rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_rate_limit_delay_max_milliseconds(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_remove_scan_count_limit(rocksdb_options_t*, int); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_arena_block_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( rocksdb_options_t*, const char*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, const char*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t*, unsigned char); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_log_error_on_recovery( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_log_error_on_recovery(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_access_hint_on_compaction_start(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_prefix_bloom_size_ratio( rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( rocksdb_options_t*, size_t, int32_t, int32_t); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( @@ -1078,17 +1184,29 @@ extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_successive_merges(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_bloom_locality(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_inplace_update_support(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( + rocksdb_options_t*); enum { rocksdb_tolerate_corrupted_tail_records_recovery = 0, @@ -1098,6 +1216,8 @@ enum { }; extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( + rocksdb_options_t*); enum { rocksdb_no_compression = 0, @@ -1111,8 +1231,12 @@ enum { }; extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( + rocksdb_options_t*); enum { rocksdb_level_compaction = 0, @@ -1121,6 +1245,8 @@ enum { }; extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( + rocksdb_options_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_universal_compaction_options( rocksdb_options_t*, rocksdb_universal_compaction_options_t*); @@ -1130,6 +1256,8 @@ extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( + rocksdb_options_t* opt); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( rocksdb_options_t* opt, rocksdb_cache_t* cache diff --git a/dist/linux_amd64/include/rocksdb/compaction_filter.h b/dist/linux_amd64/include/rocksdb/compaction_filter.h index 9765078..ed17889 100644 --- a/dist/linux_amd64/include/rocksdb/compaction_filter.h +++ b/dist/linux_amd64/include/rocksdb/compaction_filter.h @@ -45,6 +45,8 @@ class CompactionFilter { kRemove, kChangeValue, kRemoveAndSkipUntil, + kChangeBlobIndex, // used internally by BlobDB. + kIOError, // used internally by BlobDB. }; enum class BlobDecision { kKeep, kChangeValue, kCorruption, kIOError }; diff --git a/dist/linux_amd64/include/rocksdb/db.h b/dist/linux_amd64/include/rocksdb/db.h index 08609f3..5d69a2a 100644 --- a/dist/linux_amd64/include/rocksdb/db.h +++ b/dist/linux_amd64/include/rocksdb/db.h @@ -1354,6 +1354,9 @@ class DB { virtual void GetLiveFilesMetaData( std::vector* /*metadata*/) {} + // Return a list of all table checksum info + virtual Status GetLiveFilesChecksumInfo(FileChecksumList* checksum_list) = 0; + // Obtains the meta data of the specified column family of the DB. virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/, ColumnFamilyMetaData* /*metadata*/) {} @@ -1543,6 +1546,13 @@ class DB { // Returns Status::OK if identity could be set properly virtual Status GetDbIdentity(std::string& identity) const = 0; + // Return a unique identifier for each DB object that is opened + // This DB session ID should be unique among all open DB instances on all + // hosts, and should be unique among re-openings of the same or other DBs. + // (Two open DBs have the same identity from other function GetDbIdentity when + // one is physically copied from the other.) + virtual Status GetDbSessionId(std::string& session_id) const = 0; + // Returns default column family handle virtual ColumnFamilyHandle* DefaultColumnFamily() const = 0; diff --git a/dist/linux_amd64/include/rocksdb/env.h b/dist/linux_amd64/include/rocksdb/env.h index e1c54d7..4af2171 100644 --- a/dist/linux_amd64/include/rocksdb/env.h +++ b/dist/linux_amd64/include/rocksdb/env.h @@ -61,6 +61,13 @@ class FileSystem; const size_t kDefaultPageSize = 4 * 1024; +enum class CpuPriority { + kIdle = 0, + kLow = 1, + kNormal = 2, + kHigh = 3, +}; + // Options while opening a file to read/write struct EnvOptions { // Construct with default Options @@ -474,6 +481,13 @@ class Env { // Lower IO priority for threads from the specified pool. virtual void LowerThreadPoolIOPriority(Priority /*pool*/ = LOW) {} + // Lower CPU priority for threads from the specified pool. + virtual Status LowerThreadPoolCPUPriority(Priority /*pool*/, + CpuPriority /*pri*/) { + return Status::NotSupported( + "Env::LowerThreadPoolCPUPriority(Priority, CpuPriority) not supported"); + } + // Lower CPU priority for threads from the specified pool. virtual void LowerThreadPoolCPUPriority(Priority /*pool*/ = LOW) {} @@ -1355,14 +1369,18 @@ class EnvWrapper : public Env { return target_->IncBackgroundThreadsIfNeeded(num, pri); } - void LowerThreadPoolIOPriority(Priority pool = LOW) override { + void LowerThreadPoolIOPriority(Priority pool) override { target_->LowerThreadPoolIOPriority(pool); } - void LowerThreadPoolCPUPriority(Priority pool = LOW) override { + void LowerThreadPoolCPUPriority(Priority pool) override { target_->LowerThreadPoolCPUPriority(pool); } + Status LowerThreadPoolCPUPriority(Priority pool, CpuPriority pri) override { + return target_->LowerThreadPoolCPUPriority(pool, pri); + } + std::string TimeToString(uint64_t time) override { return target_->TimeToString(time); } diff --git a/dist/linux_amd64/include/rocksdb/env_encryption.h b/dist/linux_amd64/include/rocksdb/env_encryption.h index a4db10f..e7c7e35 100644 --- a/dist/linux_amd64/include/rocksdb/env_encryption.h +++ b/dist/linux_amd64/include/rocksdb/env_encryption.h @@ -169,8 +169,10 @@ class CTREncryptionProvider : public EncryptionProvider { virtual ~CTREncryptionProvider() {} // GetPrefixLength returns the length of the prefix that is added to every - // file and used for storing encryption options. For optimal performance, the - // prefix length should be a multiple of the page size. + // file + // and used for storing encryption options. + // For optimal performance, the prefix length should be a multiple of + // the page size. virtual size_t GetPrefixLength() override; // CreateNewPrefix initialized an allocated block of prefix memory @@ -194,13 +196,243 @@ class CTREncryptionProvider : public EncryptionProvider { size_t blockSize); // CreateCipherStreamFromPrefix creates a block access cipher stream for a - // file given given name and options. The given prefix is already decrypted. + // file given + // given name and options. The given prefix is already decrypted. virtual Status CreateCipherStreamFromPrefix( const std::string& fname, const EnvOptions& options, uint64_t initialCounter, const Slice& iv, const Slice& prefix, std::unique_ptr* result); }; +class EncryptedSequentialFile : public SequentialFile { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + uint64_t offset_; + size_t prefixLength_; + + public: + // Default ctor. Given underlying sequential file is supposed to be at + // offset == prefixLength. + EncryptedSequentialFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : file_(std::move(f)), + stream_(std::move(s)), + offset_(prefixLength), + prefixLength_(prefixLength) {} + + // Read up to "n" bytes from the file. "scratch[0..n-1]" may be + // written by this routine. Sets "*result" to the data that was + // read (including if fewer than "n" bytes were successfully read). + // May set "*result" to point at data in "scratch[0..n-1]", so + // "scratch[0..n-1]" must be live when "*result" is used. + // If an error was encountered, returns a non-OK status. + // + // REQUIRES: External synchronization + virtual Status Read(size_t n, Slice* result, char* scratch) override; + + // Skip "n" bytes from the file. This is guaranteed to be no + // slower that reading the same data, but may be faster. + // + // If end of file is reached, skipping will stop at the end of the + // file, and Skip will return OK. + // + // REQUIRES: External synchronization + virtual Status Skip(uint64_t n) override; + + // Indicates the upper layers if the current SequentialFile implementation + // uses direct IO. + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + virtual Status InvalidateCache(size_t offset, size_t length) override; + + // Positioned Read for direct I/O + // If Direct I/O enabled, offset, n, and scratch should be properly aligned + virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, + char* scratch) override; +}; + +// A file abstraction for randomly reading the contents of a file. +class EncryptedRandomAccessFile : public RandomAccessFile { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + size_t prefixLength_; + + public: + EncryptedRandomAccessFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : file_(std::move(f)), + stream_(std::move(s)), + prefixLength_(prefixLength) {} + + // Read up to "n" bytes from the file starting at "offset". + // "scratch[0..n-1]" may be written by this routine. Sets "*result" + // to the data that was read (including if fewer than "n" bytes were + // successfully read). May set "*result" to point at data in + // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when + // "*result" is used. If an error was encountered, returns a non-OK + // status. + // + // Safe for concurrent use by multiple threads. + // If Direct I/O enabled, offset, n, and scratch should be aligned properly. + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override; + + // Readahead the file starting from offset by n bytes for caching. + virtual Status Prefetch(uint64_t offset, size_t n) override; + + // Tries to get an unique ID for this file that will be the same each time + // the file is opened (and will stay the same while the file is open). + // Furthermore, it tries to make this ID at most "max_size" bytes. If such an + // ID can be created this function returns the length of the ID and places it + // in "id"; otherwise, this function returns 0, in which case "id" + // may not have been modified. + // + // This function guarantees, for IDs from a given environment, two unique ids + // cannot be made equal to each other by adding arbitrary bytes to one of + // them. That is, no unique ID is the prefix of another. + // + // This function guarantees that the returned ID will not be interpretable as + // a single varint. + // + // Note: these IDs are only valid for the duration of the process. + virtual size_t GetUniqueId(char* id, size_t max_size) const override; + + virtual void Hint(AccessPattern pattern) override; + + // Indicates the upper layers if the current RandomAccessFile implementation + // uses direct IO. + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + virtual Status InvalidateCache(size_t offset, size_t length) override; +}; + +// A file abstraction for sequential writing. The implementation +// must provide buffering since callers may append small fragments +// at a time to the file. +class EncryptedWritableFile : public WritableFileWrapper { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + size_t prefixLength_; + + public: + // Default ctor. Prefix is assumed to be written already. + EncryptedWritableFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : WritableFileWrapper(f.get()), + file_(std::move(f)), + stream_(std::move(s)), + prefixLength_(prefixLength) {} + + Status Append(const Slice& data) override; + + Status PositionedAppend(const Slice& data, uint64_t offset) override; + + // Indicates the upper layers if the current WritableFile implementation + // uses direct IO. + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + /* + * Get the size of valid data in the file. + */ + virtual uint64_t GetFileSize() override; + + // Truncate is necessary to trim the file to the correct size + // before closing. It is not always possible to keep track of the file + // size due to whole pages writes. The behavior is undefined if called + // with other writes to follow. + virtual Status Truncate(uint64_t size) override; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + // This call has no effect on dirty pages in the cache. + virtual Status InvalidateCache(size_t offset, size_t length) override; + + // Sync a file range with disk. + // offset is the starting byte of the file range to be synchronized. + // nbytes specifies the length of the range to be synchronized. + // This asks the OS to initiate flushing the cached data to disk, + // without waiting for completion. + // Default implementation does nothing. + virtual Status RangeSync(uint64_t offset, uint64_t nbytes) override; + + // PrepareWrite performs any necessary preparation for a write + // before the write actually occurs. This allows for pre-allocation + // of space on devices where it can result in less file + // fragmentation and/or less waste from over-zealous filesystem + // pre-allocation. + virtual void PrepareWrite(size_t offset, size_t len) override; + + // Pre-allocates space for a file. + virtual Status Allocate(uint64_t offset, uint64_t len) override; +}; + +// A file abstraction for random reading and writing. +class EncryptedRandomRWFile : public RandomRWFile { + protected: + std::unique_ptr file_; + std::unique_ptr stream_; + size_t prefixLength_; + + public: + EncryptedRandomRWFile(std::unique_ptr&& f, + std::unique_ptr&& s, + size_t prefixLength) + : file_(std::move(f)), + stream_(std::move(s)), + prefixLength_(prefixLength) {} + + // Indicates if the class makes use of direct I/O + // If false you must pass aligned buffer to Write() + virtual bool use_direct_io() const override; + + // Use the returned alignment value to allocate + // aligned buffer for Direct I/O + virtual size_t GetRequiredBufferAlignment() const override; + + // Write bytes in `data` at offset `offset`, Returns Status::OK() on success. + // Pass aligned buffer when use_direct_io() returns true. + virtual Status Write(uint64_t offset, const Slice& data) override; + + // Read up to `n` bytes starting from offset `offset` and store them in + // result, provided `scratch` size should be at least `n`. + // Returns Status::OK() on success. + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override; + + virtual Status Flush() override; + + virtual Status Sync() override; + + virtual Status Fsync() override; + + virtual Status Close() override; +}; + } // namespace ROCKSDB_NAMESPACE #endif // !defined(ROCKSDB_LITE) diff --git a/dist/linux_amd64/include/rocksdb/file_system.h b/dist/linux_amd64/include/rocksdb/file_system.h index 13a2602..8292f0e 100644 --- a/dist/linux_amd64/include/rocksdb/file_system.h +++ b/dist/linux_amd64/include/rocksdb/file_system.h @@ -869,7 +869,8 @@ class FSWritableFile { size_t num_spanned_blocks = new_last_preallocated_block - last_preallocated_block_; Allocate(block_size * last_preallocated_block_, - block_size * num_spanned_blocks, options, dbg); + block_size * num_spanned_blocks, options, dbg) + .PermitUncheckedError(); last_preallocated_block_ = new_last_preallocated_block; } } @@ -1212,8 +1213,9 @@ class FileSystemWrapper : public FileSystem { class FSSequentialFileWrapper : public FSSequentialFile { public: - explicit FSSequentialFileWrapper(FSSequentialFile* target) - : target_(target) {} + explicit FSSequentialFileWrapper(FSSequentialFile* t) : target_(t) {} + + FSSequentialFile* target() const { return target_; } IOStatus Read(size_t n, const IOOptions& options, Slice* result, char* scratch, IODebugContext* dbg) override { @@ -1239,8 +1241,9 @@ class FSSequentialFileWrapper : public FSSequentialFile { class FSRandomAccessFileWrapper : public FSRandomAccessFile { public: - explicit FSRandomAccessFileWrapper(FSRandomAccessFile* target) - : target_(target) {} + explicit FSRandomAccessFileWrapper(FSRandomAccessFile* t) : target_(t) {} + + FSRandomAccessFile* target() const { return target_; } IOStatus Read(uint64_t offset, size_t n, const IOOptions& options, Slice* result, char* scratch, @@ -1275,6 +1278,8 @@ class FSWritableFileWrapper : public FSWritableFile { public: explicit FSWritableFileWrapper(FSWritableFile* t) : target_(t) {} + FSWritableFile* target() const { return target_; } + IOStatus Append(const Slice& data, const IOOptions& options, IODebugContext* dbg) override { return target_->Append(data, options, dbg); @@ -1358,7 +1363,9 @@ class FSWritableFileWrapper : public FSWritableFile { class FSRandomRWFileWrapper : public FSRandomRWFile { public: - explicit FSRandomRWFileWrapper(FSRandomRWFile* target) : target_(target) {} + explicit FSRandomRWFileWrapper(FSRandomRWFile* t) : target_(t) {} + + FSRandomRWFile* target() const { return target_; } bool use_direct_io() const override { return target_->use_direct_io(); } size_t GetRequiredBufferAlignment() const override { @@ -1392,7 +1399,7 @@ class FSRandomRWFileWrapper : public FSRandomRWFile { class FSDirectoryWrapper : public FSDirectory { public: - explicit FSDirectoryWrapper(FSDirectory* target) : target_(target) {} + explicit FSDirectoryWrapper(FSDirectory* t) : target_(t) {} IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override { return target_->Fsync(options, dbg); diff --git a/dist/linux_amd64/include/rocksdb/listener.h b/dist/linux_amd64/include/rocksdb/listener.h index 9757071..0d3c76d 100644 --- a/dist/linux_amd64/include/rocksdb/listener.h +++ b/dist/linux_amd64/include/rocksdb/listener.h @@ -149,19 +149,49 @@ struct TableFileDeletionInfo { Status status; }; -struct FileOperationInfo { - using TimePoint = std::chrono::time_point; +enum class FileOperationType { + kRead, + kWrite, + kTruncate, + kClose, + kFlush, + kSync, + kFsync, + kRangeSync +}; +struct FileOperationInfo { + using Duration = std::chrono::nanoseconds; + using SteadyTimePoint = + std::chrono::time_point; + using SystemTimePoint = + std::chrono::time_point; + using StartTimePoint = std::pair; + using FinishTimePoint = SteadyTimePoint; + + FileOperationType type; const std::string& path; uint64_t offset; size_t length; - const TimePoint& start_timestamp; - const TimePoint& finish_timestamp; + const Duration duration; + const SystemTimePoint& start_ts; Status status; - FileOperationInfo(const std::string& _path, const TimePoint& start, - const TimePoint& finish) - : path(_path), start_timestamp(start), finish_timestamp(finish) {} + FileOperationInfo(const FileOperationType _type, const std::string& _path, + const StartTimePoint& _start_ts, + const FinishTimePoint& _finish_ts, const Status& _status) + : type(_type), + path(_path), + duration(std::chrono::duration_cast( + _finish_ts - _start_ts.second)), + start_ts(_start_ts.first), + status(_status) {} + static StartTimePoint StartNow() { + return std::make_pair( + std::chrono::system_clock::now(), std::chrono::steady_clock::now()); + } + static FinishTimePoint FinishNow() { + return std::chrono::steady_clock::now(); + } }; struct FlushJobInfo { @@ -460,7 +490,27 @@ class EventListener { // operation finishes. virtual void OnFileWriteFinish(const FileOperationInfo& /* info */) {} - // If true, the OnFileReadFinish and OnFileWriteFinish will be called. If + // A callback function for RocksDB which will be called whenever a file flush + // operation finishes. + virtual void OnFileFlushFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file sync + // operation finishes. + virtual void OnFileSyncFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file + // rangeSync operation finishes. + virtual void OnFileRangeSyncFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file + // truncate operation finishes. + virtual void OnFileTruncateFinish(const FileOperationInfo& /* info */) {} + + // A callback function for RocksDB which will be called whenever a file close + // operation finishes. + virtual void OnFileCloseFinish(const FileOperationInfo& /* info */) {} + + // If true, the OnFile*Finish functions will be called. If // false, then they won't be called. virtual bool ShouldBeNotifiedOnFileIO() { return false; } diff --git a/dist/linux_amd64/include/rocksdb/metadata.h b/dist/linux_amd64/include/rocksdb/metadata.h index f1a9ee6..9a64a7a 100644 --- a/dist/linux_amd64/include/rocksdb/metadata.h +++ b/dist/linux_amd64/include/rocksdb/metadata.h @@ -62,7 +62,9 @@ struct SstFileMetaData { being_compacted(false), num_entries(0), num_deletions(0), - oldest_blob_file_number(0) {} + oldest_blob_file_number(0), + oldest_ancester_time(0), + file_creation_time(0) {} SstFileMetaData(const std::string& _file_name, uint64_t _file_number, const std::string& _path, size_t _size, @@ -117,6 +119,8 @@ struct SstFileMetaData { // oldest SST file that is the compaction ancester of this file. // The timestamp is provided Env::GetCurrentTime(). // 0 if the information is not available. + // + // Note: for TTL blob files, it contains the start of the expiration range. uint64_t oldest_ancester_time; // Timestamp when the SST file is created, provided by Env::GetCurrentTime(). // 0 if the information is not available. diff --git a/dist/linux_amd64/include/rocksdb/options.h b/dist/linux_amd64/include/rocksdb/options.h index 040892e..5c75bd0 100644 --- a/dist/linux_amd64/include/rocksdb/options.h +++ b/dist/linux_amd64/include/rocksdb/options.h @@ -10,6 +10,7 @@ #include #include + #include #include #include @@ -21,6 +22,7 @@ #include "rocksdb/env.h" #include "rocksdb/file_checksum.h" #include "rocksdb/listener.h" +#include "rocksdb/sst_partitioner.h" #include "rocksdb/universal_compaction.h" #include "rocksdb/version.h" #include "rocksdb/write_buffer_manager.h" @@ -51,13 +53,6 @@ class InternalKeyComparator; class WalFilter; class FileSystem; -enum class CpuPriority { - kIdle = 0, - kLow = 1, - kNormal = 2, - kHigh = 3, -}; - // DB contents are stored in a set of blocks, each of which holds a // sequence of key,value pairs. Each block may be compressed before // being stored in a file. The following enum describes which @@ -315,6 +310,15 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions { // Default: nullptr std::shared_ptr compaction_thread_limiter = nullptr; + // If non-nullptr, use the specified factory for a function to determine the + // partitioning of sst files. This helps compaction to split the files + // on interesting boundaries (key prefixes) to make propagation of sst + // files less write amplifying (covering the whole key space). + // THE FEATURE IS STILL EXPERIMENTAL + // + // Default: nullptr + std::shared_ptr sst_partitioner_factory = nullptr; + // Create ColumnFamilyOptions with default values for all fields ColumnFamilyOptions(); // Create ColumnFamilyOptions from Options @@ -565,6 +569,8 @@ struct DBOptions { // concurrently perform a compaction job by breaking it into multiple, // smaller ones that are run simultaneously. // Default: 1 (i.e. no subcompactions) + // + // Dynamically changeable through SetDBOptions() API. uint32_t max_subcompactions = 1; // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the @@ -1144,6 +1150,23 @@ struct DBOptions { // not be used for recovery if best_efforts_recovery is true. // Default: false bool best_efforts_recovery = false; + + // It defines how many times db resume is called by a separate thread when + // background retryable IO Error happens. When background retryable IO + // Error happens, SetBGError is called to deal with the error. If the error + // can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), + // then db resume is called in background to recover from the error. If this + // value is 0 or negative, db resume will not be called. + // + // Default: INT_MAX + int max_bgerror_resume_count = INT_MAX; + + // If max_bgerror_resume_count is >= 2, db resume is called multiple times. + // This option decides how long to wait to retry the next resume if the + // previous resume fails and satisfy redo resume conditions. + // + // Default: 1000000 (microseconds). + uint64_t bgerror_resume_retry_interval = 1000000; }; // Options to control the behavior of a database (passed to DB::Open) diff --git a/dist/linux_amd64/include/rocksdb/persistent_cache.h b/dist/linux_amd64/include/rocksdb/persistent_cache.h index 9651812..e2dcfca 100644 --- a/dist/linux_amd64/include/rocksdb/persistent_cache.h +++ b/dist/linux_amd64/include/rocksdb/persistent_cache.h @@ -56,6 +56,12 @@ class PersistentCache { virtual StatsType Stats() = 0; virtual std::string GetPrintableOptions() const = 0; + + // Return a new numeric id. May be used by multiple clients who are + // sharding the same persistent cache to partition the key space. Typically + // the client will allocate a new id at startup and prepend the id to its + // cache keys. + virtual uint64_t NewId() = 0; }; // Factor method to create a new persistent cache diff --git a/dist/linux_amd64/include/rocksdb/sst_partitioner.h b/dist/linux_amd64/include/rocksdb/sst_partitioner.h new file mode 100644 index 0000000..5d18195 --- /dev/null +++ b/dist/linux_amd64/include/rocksdb/sst_partitioner.h @@ -0,0 +1,135 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// + +#pragma once + +#include +#include + +#include "rocksdb/rocksdb_namespace.h" +#include "rocksdb/slice.h" + +namespace ROCKSDB_NAMESPACE { + +class Slice; + +enum PartitionerResult : char { + // Partitioner does not require to create new file + kNotRequired = 0x0, + // Partitioner is requesting forcefully to create new file + kRequired = 0x1 + // Additional constants can be added +}; + +struct PartitionerRequest { + PartitionerRequest(const Slice& prev_user_key_, + const Slice& current_user_key_, + uint64_t current_output_file_size_) + : prev_user_key(&prev_user_key_), + current_user_key(¤t_user_key_), + current_output_file_size(current_output_file_size_) {} + const Slice* prev_user_key; + const Slice* current_user_key; + uint64_t current_output_file_size; +}; + +/* + * A SstPartitioner is a generic pluggable way of defining the partition + * of SST files. Compaction job will split the SST files on partition boundary + * to lower the write amplification during SST file promote to higher level. + */ +class SstPartitioner { + public: + virtual ~SstPartitioner() {} + + // Return the name of this partitioner. + virtual const char* Name() const = 0; + + // It is called for all keys in compaction. When partitioner want to create + // new SST file it needs to return true. It means compaction job will finish + // current SST file where last key is "prev_user_key" parameter and start new + // SST file where first key is "current_user_key". Returns decission if + // partition boundary was detected and compaction should create new file. + virtual PartitionerResult ShouldPartition( + const PartitionerRequest& request) = 0; + + // Called with smallest and largest keys in SST file when compation try to do + // trivial move. Returns true is partitioner allows to do trivial move. + virtual bool CanDoTrivialMove(const Slice& smallest_user_key, + const Slice& largest_user_key) = 0; + + // Context information of a compaction run + struct Context { + // Does this compaction run include all data files + bool is_full_compaction; + // Is this compaction requested by the client (true), + // or is it occurring as an automatic compaction process + bool is_manual_compaction; + // Output level for this compaction + int output_level; + // Smallest key for compaction + Slice smallest_user_key; + // Largest key for compaction + Slice largest_user_key; + }; +}; + +class SstPartitionerFactory { + public: + virtual ~SstPartitionerFactory() {} + + virtual std::unique_ptr CreatePartitioner( + const SstPartitioner::Context& context) const = 0; + + // Returns a name that identifies this partitioner factory. + virtual const char* Name() const = 0; +}; + +/* + * Fixed key prefix partitioner. It splits the output SST files when prefix + * defined by size changes. + */ +class SstPartitionerFixedPrefix : public SstPartitioner { + public: + explicit SstPartitionerFixedPrefix(size_t len) : len_(len) {} + + virtual ~SstPartitionerFixedPrefix() override {} + + const char* Name() const override { return "SstPartitionerFixedPrefix"; } + + PartitionerResult ShouldPartition(const PartitionerRequest& request) override; + + bool CanDoTrivialMove(const Slice& smallest_user_key, + const Slice& largest_user_key) override; + + private: + size_t len_; +}; + +/* + * Factory for fixed prefix partitioner. + */ +class SstPartitionerFixedPrefixFactory : public SstPartitionerFactory { + public: + explicit SstPartitionerFixedPrefixFactory(size_t len) : len_(len) {} + + virtual ~SstPartitionerFixedPrefixFactory() {} + + const char* Name() const override { + return "SstPartitionerFixedPrefixFactory"; + } + + std::unique_ptr CreatePartitioner( + const SstPartitioner::Context& /* context */) const override; + + private: + size_t len_; +}; + +extern std::shared_ptr +NewSstPartitionerFixedPrefixFactory(size_t prefix_len); + +} // namespace ROCKSDB_NAMESPACE diff --git a/dist/linux_amd64/include/rocksdb/table.h b/dist/linux_amd64/include/rocksdb/table.h index 47bf60e..95cdb8d 100644 --- a/dist/linux_amd64/include/rocksdb/table.h +++ b/dist/linux_amd64/include/rocksdb/table.h @@ -200,6 +200,40 @@ struct BlockBasedTableOptions { // incompatible with block-based filters. bool partition_filters = false; + // EXPERIMENTAL Option to generate Bloom filters that minimize memory + // internal fragmentation. + // + // When false, malloc_usable_size is not available, or format_version < 5, + // filters are generated without regard to internal fragmentation when + // loaded into memory (historical behavior). When true (and + // malloc_usable_size is available and format_version >= 5), then Bloom + // filters are generated to "round up" and "round down" their sizes to + // minimize internal fragmentation when loaded into memory, assuming the + // reading DB has the same memory allocation characteristics as the + // generating DB. This option does not break forward or backward + // compatibility. + // + // While individual filters will vary in bits/key and false positive rate + // when setting is true, the implementation attempts to maintain a weighted + // average FP rate for filters consistent with this option set to false. + // + // With Jemalloc for example, this setting is expected to save about 10% of + // the memory footprint and block cache charge of filters, while increasing + // disk usage of filters by about 1-2% due to encoding efficiency losses + // with variance in bits/key. + // + // NOTE: Because some memory counted by block cache might be unmapped pages + // within internal fragmentation, this option can increase observed RSS + // memory usage. With cache_index_and_filter_blocks=true, this option makes + // the block cache better at using space it is allowed. + // + // NOTE: Do not set to true if you do not trust malloc_usable_size. With + // this option, RocksDB might access an allocated memory object beyond its + // original size if malloc_usable_size says it is safe to do so. While this + // can be considered bad practice, it should not produce undefined behavior + // unless malloc_usable_size is buggy or broken. + bool optimize_filters_for_memory = false; + // Use delta encoding to compress keys in blocks. // ReadOptions::pin_data requires this option to be disabled. // @@ -522,7 +556,19 @@ class TableFactory { const TableReaderOptions& table_reader_options, std::unique_ptr&& file, uint64_t file_size, std::unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache = true) const = 0; + bool prefetch_index_and_filter_in_cache = true) const { + ReadOptions ro; + return NewTableReader(ro, table_reader_options, std::move(file), file_size, + table_reader, prefetch_index_and_filter_in_cache); + } + + // Overload of the above function that allows the caller to pass in a + // ReadOptions + virtual Status NewTableReader( + const ReadOptions& ro, const TableReaderOptions& table_reader_options, + std::unique_ptr&& file, uint64_t file_size, + std::unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache) const = 0; // Return a table builder to write to a file for this table type. // diff --git a/dist/linux_amd64/include/rocksdb/table_properties.h b/dist/linux_amd64/include/rocksdb/table_properties.h index 213896f..ba3eca7 100644 --- a/dist/linux_amd64/include/rocksdb/table_properties.h +++ b/dist/linux_amd64/include/rocksdb/table_properties.h @@ -30,6 +30,8 @@ typedef std::map UserCollectedProperties; // table properties' human-readable names in the property block. struct TablePropertiesNames { + static const std::string kDbId; + static const std::string kDbSessionId; static const std::string kDataSize; static const std::string kIndexSize; static const std::string kIndexPartitions; @@ -193,6 +195,17 @@ struct TableProperties { // Actual SST file creation time. 0 means unknown. uint64_t file_creation_time = 0; + // DB identity + // db_id is an identifier generated the first time the DB is created + // If DB identity is unset or unassigned, `db_id` will be an empty string. + std::string db_id; + + // DB session identity + // db_session_id is an identifier that gets reset every time the DB is opened + // If DB session identity is unset or unassigned, `db_session_id` will be an + // empty string. + std::string db_session_id; + // Name of the column family with which this SST file is associated. // If column family is unknown, `column_family_name` will be an empty string. std::string column_family_name; diff --git a/dist/linux_amd64/include/rocksdb/utilities/backupable_db.h b/dist/linux_amd64/include/rocksdb/utilities/backupable_db.h index 3f7ec99..5bea203 100644 --- a/dist/linux_amd64/include/rocksdb/utilities/backupable_db.h +++ b/dist/linux_amd64/include/rocksdb/utilities/backupable_db.h @@ -24,6 +24,11 @@ namespace ROCKSDB_NAMESPACE { +// The default DB file checksum function name. +constexpr char kDbFileChecksumFuncName[] = "FileChecksumCrc32c"; +// The default BackupEngine file checksum function name. +constexpr char kBackupFileChecksumFuncName[] = "crc32c"; + struct BackupableDBOptions { // Where to keep the backup files. Has to be different than dbname_ // Best to set this to dbname_ + "/backups" @@ -87,12 +92,13 @@ struct BackupableDBOptions { // Default: nullptr std::shared_ptr restore_rate_limiter{nullptr}; - // Only used if share_table_files is set to true. If true, will consider that - // backups can come from different databases, hence a sst is not uniquely - // identifed by its name, but by the triple (file name, crc32c, file length) + // Only used if share_table_files is set to true. If true, will consider + // that backups can come from different databases, even differently mutated + // databases with the same DB ID. See share_files_with_checksum_naming and + // ShareFilesNaming for details on how table files names are made + // unique between databases. + // // Default: false - // Note: this is an experimental option, and you'll need to set it manually - // *turn it on only if you know what you're doing* bool share_files_with_checksum; // Up to this many background threads will copy files for CreateNewBackup() @@ -116,6 +122,80 @@ struct BackupableDBOptions { // Default: INT_MAX int max_valid_backups_to_open; + // ShareFilesNaming describes possible naming schemes for backup + // table file names when the table files are stored in the shared_checksum + // directory (i.e., both share_table_files and share_files_with_checksum + // are true). + enum ShareFilesNaming : int { + // Backup SST filenames are __.sst + // where is an unsigned decimal integer. This is the + // original/legacy naming scheme for share_files_with_checksum, + // with two problems: + // * At massive scale, collisions on this triple with different file + // contents is plausible. + // * Determining the name to use requires computing the checksum, + // so generally requires reading the whole file even if the file + // is already backed up. + // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** + kLegacyCrc32cAndFileSize = 1, + + // Backup SST filenames are _s.sst. This + // pair of values should be very strongly unique for a given SST file + // and easily determined before computing a checksum. The 's' indicates + // the value is a DB session id, not a checksum. + // + // Exceptions: + // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize + // will be used instead, matching the names assigned by RocksDB versions + // not supporting the newer naming scheme. + // * See also flags below. + kUseDbSessionId = 2, + + kMaskNoNamingFlags = 0xffff, + + // If not already part of the naming scheme, insert + // _ + // before .sst in the name. In case of user code actually parsing the + // last _ before the .sst as the file size, this preserves that + // feature of kLegacyCrc32cAndFileSize. In other words, this option makes + // official that unofficial feature of the backup metadata. + // + // We do not consider SST file sizes to have sufficient entropy to + // contribute significantly to naming uniqueness. + kFlagIncludeFileSize = 1 << 31, + + // When encountering an SST file from a Facebook-internal early + // release of 6.12, use the default naming scheme in effect for + // when the SST file was generated (assuming full file checksum + // was not set to GetFileChecksumGenCrc32cFactory()). That naming is + // _.sst + // and ignores kFlagIncludeFileSize setting. + // NOTE: This flag is intended to be temporary and should be removed + // in a later release. + kFlagMatchInterimNaming = 1 << 30, + + kMaskNamingFlags = ~kMaskNoNamingFlags, + }; + + // Naming option for share_files_with_checksum table files. See + // ShareFilesNaming for details. + // + // Modifying this option cannot introduce a downgrade compatibility issue + // because RocksDB can read, restore, and delete backups using different file + // names, and it's OK for a backup directory to use a mixture of table file + // naming schemes. + // + // However, modifying this option and saving more backups to the same + // directory can lead to the same file getting saved again to that + // directory, under the new shared name in addition to the old shared + // name. + // + // Default: kUseDbSessionId | kFlagIncludeFileSize | kFlagMatchInterimNaming + // + // Note: This option comes into effect only if both share_files_with_checksum + // and share_table_files are true. + ShareFilesNaming share_files_with_checksum_naming; + void Dump(Logger* logger) const; explicit BackupableDBOptions( @@ -125,7 +205,10 @@ struct BackupableDBOptions { bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024, - int _max_valid_backups_to_open = INT_MAX) + int _max_valid_backups_to_open = INT_MAX, + ShareFilesNaming _share_files_with_checksum_naming = + static_cast(kUseDbSessionId | kFlagIncludeFileSize | + kFlagMatchInterimNaming)) : backup_dir(_backup_dir), backup_env(_backup_env), share_table_files(_share_table_files), @@ -138,18 +221,39 @@ struct BackupableDBOptions { share_files_with_checksum(false), max_background_operations(_max_background_operations), callback_trigger_interval_size(_callback_trigger_interval_size), - max_valid_backups_to_open(_max_valid_backups_to_open) { + max_valid_backups_to_open(_max_valid_backups_to_open), + share_files_with_checksum_naming(_share_files_with_checksum_naming) { assert(share_table_files || !share_files_with_checksum); + assert((share_files_with_checksum_naming & kMaskNoNamingFlags) != 0); } }; +inline BackupableDBOptions::ShareFilesNaming operator&( + BackupableDBOptions::ShareFilesNaming lhs, + BackupableDBOptions::ShareFilesNaming rhs) { + int l = static_cast(lhs); + int r = static_cast(rhs); + assert(r == BackupableDBOptions::kMaskNoNamingFlags || + (r & BackupableDBOptions::kMaskNoNamingFlags) == 0); + return static_cast(l & r); +} + +inline BackupableDBOptions::ShareFilesNaming operator|( + BackupableDBOptions::ShareFilesNaming lhs, + BackupableDBOptions::ShareFilesNaming rhs) { + int l = static_cast(lhs); + int r = static_cast(rhs); + assert((r & BackupableDBOptions::kMaskNoNamingFlags) == 0); + return static_cast(l | r); +} + struct CreateBackupOptions { // Flush will always trigger if 2PC is enabled. // If write-ahead logs are disabled, set flush_before_backup=true to // avoid losing unflushed key/value pairs from the memtable. bool flush_before_backup = false; - // Callback for reporting progress. + // Callback for reporting progress, based on callback_trigger_interval_size. std::function progress_callback = []() {}; // If false, background_thread_cpu_priority is ignored. @@ -274,16 +378,23 @@ class BackupEngineReadOnly { return RestoreDBFromLatestBackup(options, db_dir, wal_dir); } + // If verify_with_checksum is true, this function + // inspects the current checksums and file sizes of backup files to see if + // they match our expectation. + // + // If verify_with_checksum is false, this function // checks that each file exists and that the size of the file matches our - // expectations. it does not check file checksum. + // expectation. It does not check file checksum. // // If this BackupEngine created the backup, it compares the files' current - // sizes against the number of bytes written to them during creation. - // Otherwise, it compares the files' current sizes against their sizes when - // the BackupEngine was opened. + // sizes (and current checksum) against the number of bytes written to + // them (and the checksum calculated) during creation. + // Otherwise, it compares the files' current sizes (and checksums) against + // their sizes (and checksums) when the BackupEngine was opened. // // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id) = 0; + virtual Status VerifyBackup(BackupID backup_id, + bool verify_with_checksum = false) = 0; }; // A backup engine for creating new backups. @@ -395,10 +506,17 @@ class BackupEngine { return RestoreDBFromLatestBackup(options, db_dir, wal_dir); } + // If verify_with_checksum is true, this function + // inspects the current checksums and file sizes of backup files to see if + // they match our expectation. + // + // If verify_with_checksum is false, this function // checks that each file exists and that the size of the file matches our - // expectations. it does not check file checksum. + // expectation. It does not check file checksum. + // // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id) = 0; + virtual Status VerifyBackup(BackupID backup_id, + bool verify_with_checksum = false) = 0; // Will delete any files left over from incomplete creation or deletion of // a backup. This is not normally needed as those operations also clean up diff --git a/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h b/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h index 9888fa2..5a35ca7 100644 --- a/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h +++ b/dist/linux_amd64/include/rocksdb/utilities/stackable_db.h @@ -347,6 +347,11 @@ class StackableDB : public DB { db_->GetLiveFilesMetaData(metadata); } + virtual Status GetLiveFilesChecksumInfo( + FileChecksumList* checksum_list) override { + return db_->GetLiveFilesChecksumInfo(checksum_list); + } + virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family, ColumnFamilyMetaData* cf_meta) override { db_->GetColumnFamilyMetaData(column_family, cf_meta); @@ -400,6 +405,10 @@ class StackableDB : public DB { return db_->GetDbIdentity(identity); } + virtual Status GetDbSessionId(std::string& session_id) const override { + return db_->GetDbSessionId(session_id); + } + using DB::SetOptions; virtual Status SetOptions(ColumnFamilyHandle* column_family_handle, const std::unordered_map& diff --git a/dist/linux_amd64/include/rocksdb/version.h b/dist/linux_amd64/include/rocksdb/version.h index 033a8ca..fb9f46b 100644 --- a/dist/linux_amd64/include/rocksdb/version.h +++ b/dist/linux_amd64/include/rocksdb/version.h @@ -5,8 +5,8 @@ #pragma once #define ROCKSDB_MAJOR 6 -#define ROCKSDB_MINOR 11 -#define ROCKSDB_PATCH 6 +#define ROCKSDB_MINOR 12 +#define ROCKSDB_PATCH 7 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these diff --git a/dist/linux_amd64/lib/liblz4.a b/dist/linux_amd64/lib/liblz4.a index 5252daf..a76a436 100644 Binary files a/dist/linux_amd64/lib/liblz4.a and b/dist/linux_amd64/lib/liblz4.a differ diff --git a/dist/linux_amd64/lib/librocksdb.a b/dist/linux_amd64/lib/librocksdb.a index df95cd3..712bc6c 100644 Binary files a/dist/linux_amd64/lib/librocksdb.a and b/dist/linux_amd64/lib/librocksdb.a differ diff --git a/dist/linux_amd64/lib/libsnappy.a b/dist/linux_amd64/lib/libsnappy.a index 7d1feb3..1c81b20 100644 Binary files a/dist/linux_amd64/lib/libsnappy.a and b/dist/linux_amd64/lib/libsnappy.a differ diff --git a/dist/linux_amd64/lib/libz.a b/dist/linux_amd64/lib/libz.a index c1556d5..26f9b9a 100644 Binary files a/dist/linux_amd64/lib/libz.a and b/dist/linux_amd64/lib/libz.a differ diff --git a/dist/linux_amd64/lib/libzstd.a b/dist/linux_amd64/lib/libzstd.a index e754394..083165d 100644 Binary files a/dist/linux_amd64/lib/libzstd.a and b/dist/linux_amd64/lib/libzstd.a differ diff --git a/filter_policy_test.go b/filter_policy_test.go index 9e2509a..2fb79dc 100644 --- a/filter_policy_test.go +++ b/filter_policy_test.go @@ -3,7 +3,7 @@ package grocksdb import ( "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) // fatalAsError is used as a wrapper to make it possible to use ensure @@ -26,13 +26,13 @@ func TestFilterPolicy(t *testing.T) { policy := &mockFilterPolicy{ createFilter: func(keys [][]byte) []byte { createFilterCalled = true - ensure.DeepEqual(&fatalAsError{t}, keys, givenKeys) + require.EqualValues(t, keys, givenKeys) return givenFilter }, keyMayMatch: func(key, filter []byte) bool { keyMayMatchCalled = true - ensure.DeepEqual(&fatalAsError{t}, key, givenKeys[0]) - ensure.DeepEqual(&fatalAsError{t}, filter, givenFilter) + require.EqualValues(t, key, givenKeys[0]) + require.EqualValues(t, filter, givenFilter) return true }, } @@ -47,19 +47,19 @@ func TestFilterPolicy(t *testing.T) { // insert keys wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + require.Nil(t, db.Put(wo, k, []byte("val"))) } // flush to trigger the filter creation - ensure.Nil(t, db.Flush(NewDefaultFlushOptions())) - ensure.True(t, createFilterCalled) + require.Nil(t, db.Flush(NewDefaultFlushOptions())) + require.True(t, createFilterCalled) // test key may match call ro := NewDefaultReadOptions() v1, err := db.Get(ro, givenKeys[0]) defer v1.Free() - ensure.Nil(t, err) - ensure.True(t, keyMayMatchCalled) + require.Nil(t, err) + require.True(t, keyMayMatchCalled) } type mockFilterPolicy struct { diff --git a/go.mod b/go.mod index 6f4a65e..94d21c9 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,5 @@ module github.com/linxGnu/grocksdb -require ( - github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect - github.com/stretchr/testify v1.5.1 -) +require github.com/stretchr/testify v1.5.1 go 1.13 diff --git a/go.sum b/go.sum index e85ad31..331fa69 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,7 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/iterator_test.go b/iterator_test.go index a5b5ea3..6fb0279 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -3,7 +3,7 @@ package grocksdb import ( "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestIterator(t *testing.T) { @@ -14,7 +14,7 @@ func TestIterator(t *testing.T) { givenKeys := [][]byte{[]byte("key1"), []byte("key2"), []byte("key3")} wo := NewDefaultWriteOptions() for _, k := range givenKeys { - ensure.Nil(t, db.Put(wo, k, []byte("val"))) + require.Nil(t, db.Put(wo, k, []byte("val"))) } ro := NewDefaultReadOptions() @@ -26,8 +26,8 @@ func TestIterator(t *testing.T) { copy(key, iter.Key().Data()) actualKeys = append(actualKeys, key) } - ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, givenKeys) + require.Nil(t, iter.Err()) + require.EqualValues(t, actualKeys, givenKeys) } func TestIteratorCF(t *testing.T) { @@ -39,7 +39,7 @@ func TestIteratorCF(t *testing.T) { wo := NewDefaultWriteOptions() for _, k := range givenKeys { for i := range cfs { - ensure.Nil(t, db.PutCF(wo, cfs[i], k, []byte("val"))) + require.Nil(t, db.PutCF(wo, cfs[i], k, []byte("val"))) } } @@ -53,15 +53,15 @@ func TestIteratorCF(t *testing.T) { copy(key, iter.Key().Data()) actualKeys = append(actualKeys, key) } - ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, givenKeys) + require.Nil(t, iter.Err()) + require.EqualValues(t, actualKeys, givenKeys) } { ro := NewDefaultReadOptions() iters, err := db.NewIterators(ro, cfs) - ensure.Nil(t, err) - ensure.DeepEqual(t, len(iters), 4) + require.Nil(t, err) + require.EqualValues(t, len(iters), 4) defer func() { for i := range iters { iters[i].Close() @@ -75,8 +75,8 @@ func TestIteratorCF(t *testing.T) { copy(key, iter.Key().Data()) actualKeys = append(actualKeys, key) } - ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, actualKeys, givenKeys) + require.Nil(t, iter.Err()) + require.EqualValues(t, actualKeys, givenKeys) } } } diff --git a/memory_usage_test.go b/memory_usage_test.go index 0f6d517..b5bff7a 100644 --- a/memory_usage_test.go +++ b/memory_usage_test.go @@ -4,7 +4,6 @@ import ( "math/rand" "testing" - "github.com/facebookgo/ensure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,7 +30,7 @@ func TestMemoryUsage(t *testing.T) { // take first memory usage snapshot mu1, err := GetApproximateMemoryUsageByType([]*DB{db}, []*Cache{cache}) - ensure.Nil(t, err) + require.Nil(t, err) // perform IO operations that will affect in-memory tables (and maybe cache as well) wo := NewDefaultWriteOptions() @@ -42,16 +41,16 @@ func TestMemoryUsage(t *testing.T) { key := []byte("key") value := make([]byte, 1024) _, err = rand.Read(value) - ensure.Nil(t, err) + require.Nil(t, err) err = db.Put(wo, key, value) - ensure.Nil(t, err) + require.Nil(t, err) _, err = db.Get(ro, key) - ensure.Nil(t, err) + require.Nil(t, err) // take second memory usage snapshot mu2, err := GetApproximateMemoryUsageByType([]*DB{db}, []*Cache{cache}) - ensure.Nil(t, err) + require.Nil(t, err) // the amount of memory used by memtables should increase after write/read; // cache memory usage is not likely to be changed, perhaps because requested key is kept by memtable diff --git a/merge_operator_test.go b/merge_operator_test.go index d4010b5..595586c 100644 --- a/merge_operator_test.go +++ b/merge_operator_test.go @@ -3,7 +3,7 @@ package grocksdb import ( "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestMergeOperator(t *testing.T) { @@ -15,9 +15,9 @@ func TestMergeOperator(t *testing.T) { ) merger := &mockMergeOperator{ fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, existingValue, givenVal1) - ensure.DeepEqual(&fatalAsError{t}, operands, [][]byte{givenVal2}) + require.EqualValues(t, key, givenKey) + require.EqualValues(t, existingValue, givenVal1) + require.EqualValues(t, operands, [][]byte{givenVal2}) return givenMerged, true }, } @@ -27,16 +27,16 @@ func TestMergeOperator(t *testing.T) { defer db.Close() wo := NewDefaultWriteOptions() - ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) - ensure.Nil(t, db.Merge(wo, givenKey, givenVal2)) + require.Nil(t, db.Put(wo, givenKey, givenVal1)) + require.Nil(t, db.Merge(wo, givenKey, givenVal2)) // trigger a compaction to ensure that a merge is performed db.CompactRange(Range{nil, nil}) ro := NewDefaultReadOptions() v1, err := db.Get(ro, givenKey) - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), givenMerged) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), givenMerged) v1.Free() } @@ -52,15 +52,15 @@ func TestPartialMergeOperator(t *testing.T) { merger := &mockMergePartialOperator{ fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, existingValue, startingVal) - ensure.DeepEqual(&fatalAsError{t}, operands[0], pMergeResult) + require.EqualValues(t, key, givenKey) + require.EqualValues(t, existingValue, startingVal) + require.EqualValues(t, operands[0], pMergeResult) return fMergeResult, true }, partialMerge: func(key, leftOperand, rightOperand []byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, leftOperand, mergeVal1) - ensure.DeepEqual(&fatalAsError{t}, rightOperand, mergeVal2) + require.EqualValues(t, key, givenKey) + require.EqualValues(t, leftOperand, mergeVal1) + require.EqualValues(t, rightOperand, mergeVal2) return pMergeResult, true }, } @@ -73,14 +73,14 @@ func TestPartialMergeOperator(t *testing.T) { defer wo.Destroy() // insert a starting value and compact to trigger merges - ensure.Nil(t, db.Put(wo, givenKey, startingVal)) + require.Nil(t, db.Put(wo, givenKey, startingVal)) // trigger a compaction to ensure that a merge is performed db.CompactRange(Range{nil, nil}) // we expect these two operands to be passed to merge partial - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal1)) - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal2)) + require.Nil(t, db.Merge(wo, givenKey, mergeVal1)) + require.Nil(t, db.Merge(wo, givenKey, mergeVal2)) // trigger a compaction to ensure that a // partial and full merge are performed @@ -89,8 +89,8 @@ func TestPartialMergeOperator(t *testing.T) { ro := NewDefaultReadOptions() v1, err := db.Get(ro, givenKey) defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), fMergeResult) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), fMergeResult) } @@ -106,15 +106,15 @@ func TestMergeMultiOperator(t *testing.T) { merger := &mockMergeMultiOperator{ fullMerge: func(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, existingValue, startingVal) - ensure.DeepEqual(&fatalAsError{t}, operands[0], pMergeResult) + require.EqualValues(t, key, givenKey) + require.EqualValues(t, existingValue, startingVal) + require.EqualValues(t, operands[0], pMergeResult) return fMergeResult, true }, partialMergeMulti: func(key []byte, operands [][]byte) ([]byte, bool) { - ensure.DeepEqual(&fatalAsError{t}, key, givenKey) - ensure.DeepEqual(&fatalAsError{t}, operands[0], mergeVal1) - ensure.DeepEqual(&fatalAsError{t}, operands[1], mergeVal2) + require.EqualValues(t, key, givenKey) + require.EqualValues(t, operands[0], mergeVal1) + require.EqualValues(t, operands[1], mergeVal2) return pMergeResult, true }, } @@ -127,14 +127,14 @@ func TestMergeMultiOperator(t *testing.T) { defer wo.Destroy() // insert a starting value and compact to trigger merges - ensure.Nil(t, db.Put(wo, givenKey, startingVal)) + require.Nil(t, db.Put(wo, givenKey, startingVal)) // trigger a compaction to ensure that a merge is performed db.CompactRange(Range{nil, nil}) // we expect these two operands to be passed to merge multi - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal1)) - ensure.Nil(t, db.Merge(wo, givenKey, mergeVal2)) + require.Nil(t, db.Merge(wo, givenKey, mergeVal1)) + require.Nil(t, db.Merge(wo, givenKey, mergeVal2)) // trigger a compaction to ensure that a // partial and full merge are performed @@ -143,8 +143,8 @@ func TestMergeMultiOperator(t *testing.T) { ro := NewDefaultReadOptions() v1, err := db.Get(ro, givenKey) defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), fMergeResult) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), fMergeResult) } // Mock Objects diff --git a/options.go b/options.go index 0914b56..750028c 100644 --- a/options.go +++ b/options.go @@ -376,13 +376,13 @@ func (opts *Options) SetAllowConcurrentMemtableWrites(allow bool) { C.rocksdb_options_set_allow_concurrent_memtable_write(opts.c, boolToChar(allow)) } -// // AllowConcurrentMemtableWrites whether to allow concurrent memtable writes. Conccurent writes are -// // not supported by all memtable factories (currently only SkipList memtables). -// // As of rocksdb 5.0.2 you must call `SetAllowConcurrentMemtableWrites(false)` -// // if you use `OptimizeForPointLookup`. -// func (opts *Options) AllowConcurrentMemtableWrites() bool { -// return charToBool(C.rocksdb_options_get_allow_concurrent_memtable_write(opts.c)) -// } +// AllowConcurrentMemtableWrites whether to allow concurrent memtable writes. Conccurent writes are +// not supported by all memtable factories (currently only SkipList memtables). +// As of rocksdb 5.0.2 you must call `SetAllowConcurrentMemtableWrites(false)` +// if you use `OptimizeForPointLookup`. +func (opts *Options) AllowConcurrentMemtableWrites() bool { + return charToBool(C.rocksdb_options_get_allow_concurrent_memtable_write(opts.c)) +} // SetWriteBufferSize sets the amount of data to build up in memory // (backed by an unsorted log on disk) before converting to a sorted on-disk file. @@ -494,10 +494,10 @@ func (opts *Options) SetCompression(value CompressionType) { C.rocksdb_options_set_compression(opts.c, C.int(value)) } -// // GetCompression returns the compression algorithm. -// func (opts *Options) GetCompression() CompressionType { -// return CompressionType(C.rocksdb_options_get_compression(opts.c)) -// } +// GetCompression returns the compression algorithm. +func (opts *Options) GetCompression() CompressionType { + return CompressionType(C.rocksdb_options_get_compression(opts.c)) +} // SetCompressionOptions sets different options for compression algorithms. func (opts *Options) SetCompressionOptions(value CompressionOptions) { @@ -510,11 +510,11 @@ func (opts *Options) SetBottommostCompression(value CompressionType) { C.rocksdb_options_set_bottommost_compression(opts.c, C.int(value)) } -// // GetBottommostCompression returns the compression algorithm for -// // bottommost level. -// func (opts *Options) GetBottommostCompression() CompressionType { -// return CompressionType(C.rocksdb_options_get_bottommost_compression(opts.c)) -// } +// GetBottommostCompression returns the compression algorithm for +// bottommost level. +func (opts *Options) GetBottommostCompression() CompressionType { + return CompressionType(C.rocksdb_options_get_bottommost_compression(opts.c)) +} // SetBottommostCompressionOptions sets different options for compression algorithms, for bottommost. // @@ -806,12 +806,12 @@ func (opts *Options) SetMaxCompactionBytes(value uint64) { C.rocksdb_options_set_max_compaction_bytes(opts.c, C.uint64_t(value)) } -// // GetMaxCompactionBytes returns the maximum number of bytes in all compacted files. -// // We try to limit number of bytes in one compaction to be lower than this -// // threshold. But it's not guaranteed. -// func (opts *Options) GetMaxCompactionBytes() uint64 { -// return uint64(C.rocksdb_options_get_max_compaction_bytes(opts.c)) -// } +// GetMaxCompactionBytes returns the maximum number of bytes in all compacted files. +// We try to limit number of bytes in one compaction to be lower than this +// threshold. But it's not guaranteed. +func (opts *Options) GetMaxCompactionBytes() uint64 { + return uint64(C.rocksdb_options_get_max_compaction_bytes(opts.c)) +} // SetSoftPendingCompactionBytesLimit sets the threshold at which // all writes will be slowed down to at least delayed_write_rate if estimated @@ -822,12 +822,12 @@ func (opts *Options) SetSoftPendingCompactionBytesLimit(value uint64) { C.rocksdb_options_set_soft_pending_compaction_bytes_limit(opts.c, C.size_t(value)) } -// // GetSoftPendingCompactionBytesLimit returns the threshold at which -// // all writes will be slowed down to at least delayed_write_rate if estimated -// // bytes needed to be compaction exceed this threshold. -// func (opts *Options) GetSoftPendingCompactionBytesLimit() uint64 { -// return uint64(C.rocksdb_options_get_soft_pending_compaction_bytes_limit(opts.c)) -// } +// GetSoftPendingCompactionBytesLimit returns the threshold at which +// all writes will be slowed down to at least delayed_write_rate if estimated +// bytes needed to be compaction exceed this threshold. +func (opts *Options) GetSoftPendingCompactionBytesLimit() uint64 { + return uint64(C.rocksdb_options_get_soft_pending_compaction_bytes_limit(opts.c)) +} // SetHardPendingCompactionBytesLimit sets the bytes threshold at which // all writes are stopped if estimated bytes needed to be compaction exceed @@ -838,12 +838,12 @@ func (opts *Options) SetHardPendingCompactionBytesLimit(value uint64) { C.rocksdb_options_set_hard_pending_compaction_bytes_limit(opts.c, C.size_t(value)) } -// // GetHardPendingCompactionBytesLimit returns the threshold at which -// // all writes will be slowed down to at least delayed_write_rate if estimated -// // bytes needed to be compaction exceed this threshold. -// func (opts *Options) GetHardPendingCompactionBytesLimit() uint64 { -// return uint64(C.rocksdb_options_get_hard_pending_compaction_bytes_limit(opts.c)) -// } +// GetHardPendingCompactionBytesLimit returns the threshold at which +// all writes will be slowed down to at least delayed_write_rate if estimated +// bytes needed to be compaction exceed this threshold. +func (opts *Options) GetHardPendingCompactionBytesLimit() uint64 { + return uint64(C.rocksdb_options_get_hard_pending_compaction_bytes_limit(opts.c)) +} // SetMaxBytesForLevelMultiplierAdditional sets different max-size multipliers // for different levels. @@ -873,10 +873,10 @@ func (opts *Options) SetUseFsync(value bool) { C.rocksdb_options_set_use_fsync(opts.c, C.int(boolToChar(value))) } -// // UseFsync returns fsync setting. -// func (opts *Options) UseFsync() bool { -// return C.rocksdb_options_get_use_fsync(opts.c) != 0 -// } +// UseFsync returns fsync setting. +func (opts *Options) UseFsync() bool { + return C.rocksdb_options_get_use_fsync(opts.c) != 0 +} // SetDbLogDir specifies the absolute info LOG dir. // @@ -914,11 +914,11 @@ func (opts *Options) SetDeleteObsoleteFilesPeriodMicros(value uint64) { C.rocksdb_options_set_delete_obsolete_files_period_micros(opts.c, C.uint64_t(value)) } -// // GetDeleteObsoleteFilesPeriodMicros returns the periodicity -// // when obsolete files get deleted. -// func (opts *Options) GetDeleteObsoleteFilesPeriodMicros() uint64 { -// return uint64(C.rocksdb_options_get_delete_obsolete_files_period_micros(opts.c)) -// } +// GetDeleteObsoleteFilesPeriodMicros returns the periodicity +// when obsolete files get deleted. +func (opts *Options) GetDeleteObsoleteFilesPeriodMicros() uint64 { + return uint64(C.rocksdb_options_get_delete_obsolete_files_period_micros(opts.c)) +} // SetMaxBackgroundCompactions sets the maximum number of // concurrent background compaction jobs, submitted to @@ -934,10 +934,10 @@ func (opts *Options) SetMaxBackgroundCompactions(value int) { C.rocksdb_options_set_max_background_compactions(opts.c, C.int(value)) } -// // GetMaxBackgroundCompactions returns maximum number of concurrent background compaction jobs setting. -// func (opts *Options) GetMaxBackgroundCompactions() int { -// return int(C.rocksdb_options_get_max_background_compactions(opts.c)) -// } +// GetMaxBackgroundCompactions returns maximum number of concurrent background compaction jobs setting. +func (opts *Options) GetMaxBackgroundCompactions() int { + return int(C.rocksdb_options_get_max_background_compactions(opts.c)) +} // SetMaxBackgroundFlushes sets the maximum number of // concurrent background memtable flush jobs, submitted to @@ -961,11 +961,11 @@ func (opts *Options) SetMaxBackgroundFlushes(value int) { C.rocksdb_options_set_max_background_flushes(opts.c, C.int(value)) } -// // GetMaxBackgroundFlushes returns the maximum number of concurrent background -// // memtable flush jobs setting. -// func (opts *Options) GetMaxBackgroundFlushes() int { -// return int(C.rocksdb_options_get_max_background_flushes(opts.c)) -// } +// GetMaxBackgroundFlushes returns the maximum number of concurrent background +// memtable flush jobs setting. +func (opts *Options) GetMaxBackgroundFlushes() int { + return int(C.rocksdb_options_get_max_background_flushes(opts.c)) +} // SetMaxLogFileSize sets the maximum size of the info log file. // @@ -977,10 +977,10 @@ func (opts *Options) SetMaxLogFileSize(value uint64) { C.rocksdb_options_set_max_log_file_size(opts.c, C.size_t(value)) } -// // GetMaxLogFileSize returns setting for maximum size of the info log file. -// func (opts *Options) GetMaxLogFileSize() uint64 { -// return uint64(C.rocksdb_options_get_max_log_file_size(opts.c)) -// } +// GetMaxLogFileSize returns setting for maximum size of the info log file. +func (opts *Options) GetMaxLogFileSize() uint64 { + return uint64(C.rocksdb_options_get_max_log_file_size(opts.c)) +} // SetLogFileTimeToRoll sets the time for the info log file to roll (in seconds). // @@ -991,10 +991,10 @@ func (opts *Options) SetLogFileTimeToRoll(value uint64) { C.rocksdb_options_set_log_file_time_to_roll(opts.c, C.size_t(value)) } -// // GetLogFileTimeToRoll returns the time for info log file to roll (in seconds). -// func (opts *Options) GetLogFileTimeToRoll() uint64 { -// return uint64(C.rocksdb_options_get_log_file_time_to_roll(opts.c)) -// } +// GetLogFileTimeToRoll returns the time for info log file to roll (in seconds). +func (opts *Options) GetLogFileTimeToRoll() uint64 { + return uint64(C.rocksdb_options_get_log_file_time_to_roll(opts.c)) +} // SetKeepLogFileNum sets the maximum info log files to be kept. // Default: 1000 @@ -1002,10 +1002,10 @@ func (opts *Options) SetKeepLogFileNum(value uint) { C.rocksdb_options_set_keep_log_file_num(opts.c, C.size_t(value)) } -// // GetKeepLogFileNum return setting for maximum info log files to be kept. -// func (opts *Options) GetKeepLogFileNum() uint { -// return uint(C.rocksdb_options_get_keep_log_file_num(opts.c)) -// } +// GetKeepLogFileNum return setting for maximum info log files to be kept. +func (opts *Options) GetKeepLogFileNum() uint { + return uint(C.rocksdb_options_get_keep_log_file_num(opts.c)) +} // SetSoftRateLimit sets the soft rate limit. // @@ -1018,10 +1018,10 @@ func (opts *Options) SetSoftRateLimit(value float64) { C.rocksdb_options_set_soft_rate_limit(opts.c, C.double(value)) } -// // GetSoftRateLimit returns setting for soft rate limit. -// func (opts *Options) GetSoftRateLimit() float64 { -// return float64(C.rocksdb_options_get_soft_rate_limit(opts.c)) -// } +// GetSoftRateLimit returns setting for soft rate limit. +func (opts *Options) GetSoftRateLimit() float64 { + return float64(C.rocksdb_options_get_soft_rate_limit(opts.c)) +} // SetHardRateLimit sets the hard rate limit. // @@ -1032,10 +1032,10 @@ func (opts *Options) SetHardRateLimit(value float64) { C.rocksdb_options_set_hard_rate_limit(opts.c, C.double(value)) } -// // GetHardRateLimit returns setting for hard rate limit. -// func (opts *Options) GetHardRateLimit() float64 { -// return float64(C.rocksdb_options_get_hard_rate_limit(opts.c)) -// } +// GetHardRateLimit returns setting for hard rate limit. +func (opts *Options) GetHardRateLimit() float64 { + return float64(C.rocksdb_options_get_hard_rate_limit(opts.c)) +} // SetRateLimitDelayMaxMilliseconds sets the max time // a put will be stalled when hard_rate_limit is enforced. @@ -1045,11 +1045,11 @@ func (opts *Options) SetRateLimitDelayMaxMilliseconds(value uint) { C.rocksdb_options_set_rate_limit_delay_max_milliseconds(opts.c, C.uint(value)) } -// // GetRateLimitDelayMaxMilliseconds sets the max time -// // a put will be stalled when hard_rate_limit is enforced. -// func (opts *Options) GetRateLimitDelayMaxMilliseconds() uint { -// return uint(C.rocksdb_options_get_rate_limit_delay_max_milliseconds(opts.c)) -// } +// GetRateLimitDelayMaxMilliseconds sets the max time +// a put will be stalled when hard_rate_limit is enforced. +func (opts *Options) GetRateLimitDelayMaxMilliseconds() uint { + return uint(C.rocksdb_options_get_rate_limit_delay_max_milliseconds(opts.c)) +} // SetMaxManifestFileSize sets the maximum manifest file size until is rolled over. // The older manifest file be deleted. @@ -1058,11 +1058,11 @@ func (opts *Options) SetMaxManifestFileSize(value uint64) { C.rocksdb_options_set_max_manifest_file_size(opts.c, C.size_t(value)) } -// // GetMaxManifestFileSize returns the maximum manifest file size until is rolled over. -// // The older manifest file be deleted. -// func (opts *Options) GetMaxManifestFileSize() uint64 { -// return uint64(C.rocksdb_options_get_max_manifest_file_size(opts.c)) -// } +// GetMaxManifestFileSize returns the maximum manifest file size until is rolled over. +// The older manifest file be deleted. +func (opts *Options) GetMaxManifestFileSize() uint64 { + return uint64(C.rocksdb_options_get_max_manifest_file_size(opts.c)) +} // SetTableCacheNumshardbits sets the number of shards used for table cache. // Default: 4 @@ -1070,10 +1070,10 @@ func (opts *Options) SetTableCacheNumshardbits(value int) { C.rocksdb_options_set_table_cache_numshardbits(opts.c, C.int(value)) } -// // GetTableCacheNumshardbits returns the number of shards used for table cache. -// func (opts *Options) GetTableCacheNumshardbits() int { -// return int(C.rocksdb_options_get_table_cache_numshardbits(opts.c)) -// } +// GetTableCacheNumshardbits returns the number of shards used for table cache. +func (opts *Options) GetTableCacheNumshardbits() int { + return int(C.rocksdb_options_get_table_cache_numshardbits(opts.c)) +} // SetTableCacheRemoveScanCountLimit sets the count limit during a scan. // @@ -1101,10 +1101,10 @@ func (opts *Options) SetArenaBlockSize(value uint64) { C.rocksdb_options_set_arena_block_size(opts.c, C.size_t(value)) } -// // SetArenaBlockSize returns the size of one block in arena memory allocation. -// func (opts *Options) GetArenaBlockSize() uint64 { -// return uint64(C.rocksdb_options_get_arena_block_size(opts.c)) -// } +// GetArenaBlockSize returns the size of one block in arena memory allocation. +func (opts *Options) GetArenaBlockSize() uint64 { + return uint64(C.rocksdb_options_get_arena_block_size(opts.c)) +} // SetDisableAutoCompactions enable/disable automatic compactions. // @@ -1115,10 +1115,10 @@ func (opts *Options) SetDisableAutoCompactions(value bool) { C.rocksdb_options_set_disable_auto_compactions(opts.c, C.int(boolToChar(value))) } -// // DisabledAutoCompactions returns if automatic compactions is disabled. -// func (opts *Options) DisabledAutoCompactions() bool { -// return charToBool(C.rocksdb_options_get_disable_auto_compactions(opts.c)) -// } +// DisabledAutoCompactions returns if automatic compactions is disabled. +func (opts *Options) DisabledAutoCompactions() bool { + return charToBool(C.rocksdb_options_get_disable_auto_compactions(opts.c)) +} // SetWALRecoveryMode sets the recovery mode. // Recovery mode to control the consistency while replaying WAL. @@ -1128,10 +1128,10 @@ func (opts *Options) SetWALRecoveryMode(mode WALRecoveryMode) { C.rocksdb_options_set_wal_recovery_mode(opts.c, C.int(mode)) } -// // GetWALRecoveryMode returns the recovery mode. -// func (opts *Options) GetWALRecoveryMode() WALRecoveryMode { -// return WALRecoveryMode(C.rocksdb_options_get_wal_recovery_mode(opts.c)) -// } +// GetWALRecoveryMode returns the recovery mode. +func (opts *Options) GetWALRecoveryMode() WALRecoveryMode { + return WALRecoveryMode(C.rocksdb_options_get_wal_recovery_mode(opts.c)) +} // SetWALTtlSeconds sets the WAL ttl in seconds. // @@ -1153,10 +1153,10 @@ func (opts *Options) SetWALTtlSeconds(value uint64) { C.rocksdb_options_set_WAL_ttl_seconds(opts.c, C.uint64_t(value)) } -// // GetWALTtlSeconds returns WAL ttl in seconds. -// func (opts *Options) GetWALTtlSeconds() uint64 { -// return uint64(C.rocksdb_options_get_WAL_ttl_seconds(opts.c)) -// } +// GetWALTtlSeconds returns WAL ttl in seconds. +func (opts *Options) GetWALTtlSeconds() uint64 { + return uint64(C.rocksdb_options_get_WAL_ttl_seconds(opts.c)) +} // SetWalSizeLimitMb sets the WAL size limit in MB. // @@ -1168,10 +1168,10 @@ func (opts *Options) SetWalSizeLimitMb(value uint64) { C.rocksdb_options_set_WAL_size_limit_MB(opts.c, C.uint64_t(value)) } -// // GetWalSizeLimitMb returns the WAL size limit in MB. -// func (opts *Options) GetWalSizeLimitMb() uint64 { -// return uint64(C.rocksdb_options_get_WAL_size_limit_MB(opts.c)) -// } +// GetWalSizeLimitMb returns the WAL size limit in MB. +func (opts *Options) GetWalSizeLimitMb() uint64 { + return uint64(C.rocksdb_options_get_WAL_size_limit_MB(opts.c)) +} // SetEnablePipelinedWrite enables pipelined write. // @@ -1207,11 +1207,11 @@ func (opts *Options) SetManifestPreallocationSize(value uint64) { C.rocksdb_options_set_manifest_preallocation_size(opts.c, C.size_t(value)) } -// // GetManifestPreallocationSize returns the number of bytes -// // to preallocate (via fallocate) the manifest files. -// func (opts *Options) GetManifestPreallocationSize() uint64 { -// return uint64(C.rocksdb_options_get_manifest_preallocation_size(opts.c)) -// } +// GetManifestPreallocationSize returns the number of bytes +// to preallocate (via fallocate) the manifest files. +func (opts *Options) GetManifestPreallocationSize() uint64 { + return uint64(C.rocksdb_options_get_manifest_preallocation_size(opts.c)) +} // SetPurgeRedundantKvsWhileFlush enable/disable purging of // duplicate/deleted keys when a memtable is flushed to storage. @@ -1227,10 +1227,10 @@ func (opts *Options) SetAllowMmapReads(value bool) { C.rocksdb_options_set_allow_mmap_reads(opts.c, boolToChar(value)) } -// // AllowMmapReads returns setting for enable/disable mmap reads for sst tables. -// func (opts *Options) AllowMmapReads() bool { -// return charToBool(C.rocksdb_options_get_allow_mmap_reads(opts.c)) -// } +// AllowMmapReads returns setting for enable/disable mmap reads for sst tables. +func (opts *Options) AllowMmapReads() bool { + return charToBool(C.rocksdb_options_get_allow_mmap_reads(opts.c)) +} // SetAllowMmapWrites enable/disable mmap writes for writing sst tables. // Default: false @@ -1238,10 +1238,10 @@ func (opts *Options) SetAllowMmapWrites(value bool) { C.rocksdb_options_set_allow_mmap_writes(opts.c, boolToChar(value)) } -// // AllowMmapWrites returns setting for enable/disable mmap writes for sst tables. -// func (opts *Options) AllowMmapWrites() bool { -// return charToBool(C.rocksdb_options_get_allow_mmap_writes(opts.c)) -// } +// AllowMmapWrites returns setting for enable/disable mmap writes for sst tables. +func (opts *Options) AllowMmapWrites() bool { + return charToBool(C.rocksdb_options_get_allow_mmap_writes(opts.c)) +} // SetUseDirectReads enable/disable direct I/O mode (O_DIRECT) for reads // Default: false @@ -1249,10 +1249,10 @@ func (opts *Options) SetUseDirectReads(value bool) { C.rocksdb_options_set_use_direct_reads(opts.c, boolToChar(value)) } -// // UseDirectReads returns setting for enable/disable direct I/O mode (O_DIRECT) for reads -// func (opts *Options) UseDirectReads() bool { -// return charToBool(C.rocksdb_options_get_use_direct_reads(opts.c)) -// } +// UseDirectReads returns setting for enable/disable direct I/O mode (O_DIRECT) for reads +func (opts *Options) UseDirectReads() bool { + return charToBool(C.rocksdb_options_get_use_direct_reads(opts.c)) +} // SetUseDirectIOForFlushAndCompaction enable/disable direct I/O mode (O_DIRECT) for both reads and writes in background flush and compactions // When true, new_table_reader_for_compaction_inputs is forced to true. @@ -1261,11 +1261,11 @@ func (opts *Options) SetUseDirectIOForFlushAndCompaction(value bool) { C.rocksdb_options_set_use_direct_io_for_flush_and_compaction(opts.c, boolToChar(value)) } -// // UseDirectIOForFlushAndCompaction returns setting for enable/disable direct I/O mode (O_DIRECT) -// // for both reads and writes in background flush and compactions -// func (opts *Options) UseDirectIOForFlushAndCompaction() bool { -// return charToBool(C.rocksdb_options_get_use_direct_io_for_flush_and_compaction(opts.c)) -// } +// UseDirectIOForFlushAndCompaction returns setting for enable/disable direct I/O mode (O_DIRECT) +// for both reads and writes in background flush and compactions +func (opts *Options) UseDirectIOForFlushAndCompaction() bool { + return charToBool(C.rocksdb_options_get_use_direct_io_for_flush_and_compaction(opts.c)) +} // SetIsFdCloseOnExec enable/dsiable child process inherit open files. // Default: true @@ -1273,10 +1273,10 @@ func (opts *Options) SetIsFdCloseOnExec(value bool) { C.rocksdb_options_set_is_fd_close_on_exec(opts.c, boolToChar(value)) } -// // IsFdCloseOnExec returns setting for enable/dsiable child process inherit open files. -// func (opts *Options) IsFdCloseOnExec() bool { -// return charToBool(C.rocksdb_options_get_is_fd_close_on_exec(opts.c)) -// } +// IsFdCloseOnExec returns setting for enable/dsiable child process inherit open files. +func (opts *Options) IsFdCloseOnExec() bool { + return charToBool(C.rocksdb_options_get_is_fd_close_on_exec(opts.c)) +} // SetSkipLogErrorOnRecovery enable/disable skipping of // log corruption error on recovery (If client is ok with @@ -1288,12 +1288,12 @@ func (opts *Options) SetSkipLogErrorOnRecovery(value bool) { C.rocksdb_options_set_skip_log_error_on_recovery(opts.c, boolToChar(value)) } -// // SkipLogErrorOnRecovery returns setting for enable/disable skipping of -// // log corruption error on recovery (If client is ok with -// // losing most recent changes). -// func (opts *Options) SkipLogErrorOnRecovery() bool { -// return charToBool(C.rocksdb_options_get_skip_log_error_on_recovery(opts.c)) -// } +// SkipLogErrorOnRecovery returns setting for enable/disable skipping of +// log corruption error on recovery (If client is ok with +// losing most recent changes). +func (opts *Options) SkipLogErrorOnRecovery() bool { + return charToBool(C.rocksdb_options_get_skip_log_error_on_recovery(opts.c)) +} // SetStatsDumpPeriodSec sets the stats dump period in seconds. // @@ -1303,22 +1303,22 @@ func (opts *Options) SetStatsDumpPeriodSec(value uint) { C.rocksdb_options_set_stats_dump_period_sec(opts.c, C.uint(value)) } -// // GetStatsDumpPeriodSec returns the stats dump period in seconds. -// func (opts *Options) GetStatsDumpPeriodSec() uint { -// return uint(C.rocksdb_options_get_stats_dump_period_sec(opts.c)) -// } +// GetStatsDumpPeriodSec returns the stats dump period in seconds. +func (opts *Options) GetStatsDumpPeriodSec() uint { + return uint(C.rocksdb_options_get_stats_dump_period_sec(opts.c)) +} -// // SetStatsPersistPeriodSec if not zero, dump rocksdb.stats to RocksDB every stats_persist_period_sec -// // -// // Default: 600 -// func (opts *Options) SetStatsPersistPeriodSec(value uint) { -// C.rocksdb_options_set_stats_persist_period_sec(opts.c, C.uint(value)) -// } +// SetStatsPersistPeriodSec if not zero, dump rocksdb.stats to RocksDB every stats_persist_period_sec +// +// Default: 600 +func (opts *Options) SetStatsPersistPeriodSec(value uint) { + C.rocksdb_options_set_stats_persist_period_sec(opts.c, C.uint(value)) +} -// // GetStatsPersistPeriodSec returns number of sec that RocksDB periodically dump stats. -// func (opts *Options) GetStatsPersistPeriodSec() uint { -// return uint(C.rocksdb_options_get_stats_persist_period_sec(opts.c)) -// } +// GetStatsPersistPeriodSec returns number of sec that RocksDB periodically dump stats. +func (opts *Options) GetStatsPersistPeriodSec() uint { + return uint(C.rocksdb_options_get_stats_persist_period_sec(opts.c)) +} // SetAdviseRandomOnOpen specifies whether we will hint the underlying // file system that the file access pattern is random, when a sst file is opened. @@ -1327,11 +1327,11 @@ func (opts *Options) SetAdviseRandomOnOpen(value bool) { C.rocksdb_options_set_advise_random_on_open(opts.c, boolToChar(value)) } -// // AdviseRandomOnOpen returns whether we will hint the underlying -// // file system that the file access pattern is random, when a sst file is opened. -// func (opts *Options) AdviseRandomOnOpen() bool { -// return charToBool(C.rocksdb_options_get_advise_random_on_open(opts.c)) -// } +// AdviseRandomOnOpen returns whether we will hint the underlying +// file system that the file access pattern is random, when a sst file is opened. +func (opts *Options) AdviseRandomOnOpen() bool { + return charToBool(C.rocksdb_options_get_advise_random_on_open(opts.c)) +} // SetDbWriteBufferSize sets the amount of data to build up // in memtables across all column families before writing to disk. @@ -1361,11 +1361,11 @@ func (opts *Options) SetAccessHintOnCompactionStart(value CompactionAccessPatter C.rocksdb_options_set_access_hint_on_compaction_start(opts.c, C.int(value)) } -// // GetAccessHintOnCompactionStart returns the file access pattern -// // once a compaction is started. -// func (opts *Options) GetAccessHintOnCompactionStart() CompactionAccessPattern { -// return CompactionAccessPattern(C.rocksdb_options_get_access_hint_on_compaction_start(opts.c)) -// } +// GetAccessHintOnCompactionStart returns the file access pattern +// once a compaction is started. +func (opts *Options) GetAccessHintOnCompactionStart() CompactionAccessPattern { + return CompactionAccessPattern(C.rocksdb_options_get_access_hint_on_compaction_start(opts.c)) +} // SetUseAdaptiveMutex enable/disable adaptive mutex, which spins // in the user space before resorting to kernel. @@ -1378,11 +1378,11 @@ func (opts *Options) SetUseAdaptiveMutex(value bool) { C.rocksdb_options_set_use_adaptive_mutex(opts.c, boolToChar(value)) } -// // UseAdaptiveMutex returns setting for enable/disable adaptive mutex, which spins -// // in the user space before resorting to kernel. -// func (opts *Options) UseAdaptiveMutex() bool { -// return charToBool(C.rocksdb_options_get_use_adaptive_mutex(opts.c)) -// } +// UseAdaptiveMutex returns setting for enable/disable adaptive mutex, which spins +// in the user space before resorting to kernel. +func (opts *Options) UseAdaptiveMutex() bool { + return charToBool(C.rocksdb_options_get_use_adaptive_mutex(opts.c)) +} // SetBytesPerSync sets the bytes per sync. // @@ -1394,10 +1394,10 @@ func (opts *Options) SetBytesPerSync(value uint64) { C.rocksdb_options_set_bytes_per_sync(opts.c, C.uint64_t(value)) } -// // GetBytesPerSync return setting for bytes (size) per sync. -// func (opts *Options) GetBytesPerSync() uint64 { -// return uint64(C.rocksdb_options_get_bytes_per_sync(opts.c)) -// } +// GetBytesPerSync return setting for bytes (size) per sync. +func (opts *Options) GetBytesPerSync() uint64 { + return uint64(C.rocksdb_options_get_bytes_per_sync(opts.c)) +} // SetCompactionStyle sets compaction style. // @@ -1406,10 +1406,10 @@ func (opts *Options) SetCompactionStyle(value CompactionStyle) { C.rocksdb_options_set_compaction_style(opts.c, C.int(value)) } -// // GetCompactionStyle returns compaction style. -// func (opts *Options) GetCompactionStyle() CompactionStyle { -// return CompactionStyle(C.rocksdb_options_get_compaction_style(opts.c)) -// } +// GetCompactionStyle returns compaction style. +func (opts *Options) GetCompactionStyle() CompactionStyle { + return CompactionStyle(C.rocksdb_options_get_compaction_style(opts.c)) +} // SetUniversalCompactionOptions sets the options needed // to support Universal Style compactions. @@ -1472,22 +1472,22 @@ func (opts *Options) SetAtomicFlush(value bool) { C.rocksdb_options_set_atomic_flush(opts.c, boolToChar(value)) } -// // IsAtomicFlush returns setting for atomic flushing. -// // If true, RocksDB supports flushing multiple column families and committing -// // their results atomically to MANIFEST. Note that it is not -// // necessary to set atomic_flush to true if WAL is always enabled since WAL -// // allows the database to be restored to the last persistent state in WAL. -// // This option is useful when there are column families with writes NOT -// // protected by WAL. -// // For manual flush, application has to specify which column families to -// // flush atomically in DB::Flush. -// // For auto-triggered flush, RocksDB atomically flushes ALL column families. -// // -// // Currently, any WAL-enabled writes after atomic flush may be replayed -// // independently if the process crashes later and tries to recover. -// func (opts *Options) IsAtomicFlush() bool { -// return charToBool(C.rocksdb_options_get_atomic_flush(opts.c)) -// } +// IsAtomicFlush returns setting for atomic flushing. +// If true, RocksDB supports flushing multiple column families and committing +// their results atomically to MANIFEST. Note that it is not +// necessary to set atomic_flush to true if WAL is always enabled since WAL +// allows the database to be restored to the last persistent state in WAL. +// This option is useful when there are column families with writes NOT +// protected by WAL. +// For manual flush, application has to specify which column families to +// flush atomically in DB::Flush. +// For auto-triggered flush, RocksDB atomically flushes ALL column families. +// +// Currently, any WAL-enabled writes after atomic flush may be replayed +// independently if the process crashes later and tries to recover. +func (opts *Options) IsAtomicFlush() bool { + return charToBool(C.rocksdb_options_get_atomic_flush(opts.c)) +} // SetRowCache set global cache for table-level rows. // @@ -1508,11 +1508,11 @@ func (opts *Options) SetMaxSequentialSkipInIterations(value uint64) { C.rocksdb_options_set_max_sequential_skip_in_iterations(opts.c, C.uint64_t(value)) } -// // GetMaxSequentialSkipInIterations returns the number of keys (with the same userkey) -// // that will be sequentially skipped before a reseek is issued. -// func (opts *Options) GetMaxSequentialSkipInIterations() uint64 { -// return uint64(C.rocksdb_options_get_max_sequential_skip_in_iterations(opts.c)) -// } +// GetMaxSequentialSkipInIterations returns the number of keys (with the same userkey) +// that will be sequentially skipped before a reseek is issued. +func (opts *Options) GetMaxSequentialSkipInIterations() uint64 { + return uint64(C.rocksdb_options_get_max_sequential_skip_in_iterations(opts.c)) +} // SetInplaceUpdateSupport enable/disable thread-safe inplace updates. // @@ -1526,11 +1526,11 @@ func (opts *Options) SetInplaceUpdateSupport(value bool) { C.rocksdb_options_set_inplace_update_support(opts.c, boolToChar(value)) } -// // InplaceUpdateSupport returns setting for enable/disable -// // thread-safe inplace updates. -// func (opts *Options) InplaceUpdateSupport() bool { -// return charToBool(C.rocksdb_options_get_inplace_update_support(opts.c)) -// } +// InplaceUpdateSupport returns setting for enable/disable +// thread-safe inplace updates. +func (opts *Options) InplaceUpdateSupport() bool { + return charToBool(C.rocksdb_options_get_inplace_update_support(opts.c)) +} // SetInplaceUpdateNumLocks sets the number of locks used for inplace update. // @@ -1539,10 +1539,10 @@ func (opts *Options) SetInplaceUpdateNumLocks(value uint) { C.rocksdb_options_set_inplace_update_num_locks(opts.c, C.size_t(value)) } -// // GetInplaceUpdateNumLocks returns number of locks used for inplace upddate. -// func (opts *Options) GetInplaceUpdateNumLocks() uint { -// return uint(C.rocksdb_options_get_inplace_update_num_locks(opts.c)) -// } +// GetInplaceUpdateNumLocks returns number of locks used for inplace upddate. +func (opts *Options) GetInplaceUpdateNumLocks() uint { + return uint(C.rocksdb_options_get_inplace_update_num_locks(opts.c)) +} // SetMemtableHugePageSize sets the page size for huge page for // arena used by the memtable. @@ -1559,11 +1559,11 @@ func (opts *Options) SetMemtableHugePageSize(value uint64) { C.rocksdb_options_set_memtable_huge_page_size(opts.c, C.size_t(value)) } -// // GetMemtableHugePageSize returns the page size for huge page for -// // arena used by the memtable. -// func (opts *Options) GetMemtableHugePageSize() uint64 { -// return uint64(C.rocksdb_options_get_memtable_huge_page_size(opts.c)) -// } +// GetMemtableHugePageSize returns the page size for huge page for +// arena used by the memtable. +func (opts *Options) GetMemtableHugePageSize() uint64 { + return uint64(C.rocksdb_options_get_memtable_huge_page_size(opts.c)) +} // SetBloomLocality sets the bloom locality. // @@ -1580,17 +1580,17 @@ func (opts *Options) SetBloomLocality(value uint32) { C.rocksdb_options_set_bloom_locality(opts.c, C.uint32_t(value)) } -// // GetBloomLocality returns control locality of bloom filter probes to improve cache miss rate. -// // This option only applies to memtable prefix bloom and plaintable -// // prefix bloom. It essentially limits the max number of cache lines each -// // bloom filter check can touch. -// // This optimization is turned off when set to 0. The number should never -// // be greater than number of probes. This option can boost performance -// // for in-memory workload but should use with care since it can cause -// // higher false positive rate. -// func (opts *Options) GetBloomLocality() uint32 { -// return uint32(C.rocksdb_options_get_bloom_locality(opts.c)) -// } +// GetBloomLocality returns control locality of bloom filter probes to improve cache miss rate. +// This option only applies to memtable prefix bloom and plaintable +// prefix bloom. It essentially limits the max number of cache lines each +// bloom filter check can touch. +// This optimization is turned off when set to 0. The number should never +// be greater than number of probes. This option can boost performance +// for in-memory workload but should use with care since it can cause +// higher false positive rate. +func (opts *Options) GetBloomLocality() uint32 { + return uint32(C.rocksdb_options_get_bloom_locality(opts.c)) +} // SetMaxSuccessiveMerges sets the maximum number of // successive merge operations on a key in the memtable. @@ -1605,17 +1605,17 @@ func (opts *Options) SetMaxSuccessiveMerges(value uint) { C.rocksdb_options_set_max_successive_merges(opts.c, C.size_t(value)) } -// // GetMaxSuccessiveMerges returns the maximum number of -// // successive merge operations on a key in the memtable. -// // -// // When a merge operation is added to the memtable and the maximum number of -// // successive merges is reached, the value of the key will be calculated and -// // inserted into the memtable instead of the merge operation. This will -// // ensure that there are never more than max_successive_merges merge -// // operations in the memtable. -// func (opts *Options) GetMaxSuccessiveMerges() uint { -// return uint(C.rocksdb_options_get_max_successive_merges(opts.c)) -// } +// GetMaxSuccessiveMerges returns the maximum number of +// successive merge operations on a key in the memtable. +// +// When a merge operation is added to the memtable and the maximum number of +// successive merges is reached, the value of the key will be calculated and +// inserted into the memtable instead of the merge operation. This will +// ensure that there are never more than max_successive_merges merge +// operations in the memtable. +func (opts *Options) GetMaxSuccessiveMerges() uint { + return uint(C.rocksdb_options_get_max_successive_merges(opts.c)) +} // EnableStatistics enable statistics. func (opts *Options) EnableStatistics() { @@ -1729,10 +1729,10 @@ func (opts *Options) SetMemTablePrefixBloomSizeRatio(value float64) { C.rocksdb_options_set_memtable_prefix_bloom_size_ratio(opts.c, C.double(value)) } -// // GetMemTablePrefixBloomSizeRatio returns memtable_prefix_bloom_size_ratio. -// func (opts *Options) GetMemTablePrefixBloomSizeRatio() float64 { -// return float64(C.rocksdb_options_get_memtable_prefix_bloom_size_ratio(opts.c)) -// } +// GetMemTablePrefixBloomSizeRatio returns memtable_prefix_bloom_size_ratio. +func (opts *Options) GetMemTablePrefixBloomSizeRatio() float64 { + return float64(C.rocksdb_options_get_memtable_prefix_bloom_size_ratio(opts.c)) +} // SetOptimizeFiltersForHits sets optimize_filters_for_hits // This flag specifies that the implementation should optimize the filters @@ -1753,10 +1753,10 @@ func (opts *Options) SetOptimizeFiltersForHits(value bool) { C.rocksdb_options_set_optimize_filters_for_hits(opts.c, C.int(boolToChar(value))) } -// // OptimizeFiltersForHits gets setting for optimize_filters_for_hits. -// func (opts *Options) OptimizeFiltersForHits() bool { -// return charToBool(C.rocksdb_options_get_optimize_filters_for_hits(opts.c)) -// } +// OptimizeFiltersForHits gets setting for optimize_filters_for_hits. +func (opts *Options) OptimizeFiltersForHits() bool { + return charToBool(C.rocksdb_options_get_optimize_filters_for_hits(opts.c)) +} // CompactionReadaheadSize if non-zero, we perform bigger reads when doing // compaction. If you're running RocksDB on spinning disks, you should set @@ -1926,10 +1926,10 @@ func (opts *Options) SetMaxBackgroundJobs(value int) { C.rocksdb_options_set_max_background_jobs(opts.c, C.int(value)) } -// // GetMaxBackgroundJobs returns maximum number of concurrent background jobs setting. -// func (opts *Options) GetMaxBackgroundJobs() int { -// return int(C.rocksdb_options_get_max_background_jobs(opts.c)) -// } +// GetMaxBackgroundJobs returns maximum number of concurrent background jobs setting. +func (opts *Options) GetMaxBackgroundJobs() int { + return int(C.rocksdb_options_get_max_background_jobs(opts.c)) +} // SetRecycleLogFileNum if non-zero, we will reuse previously written // log files for new logs, overwriting the old data. The value @@ -1942,10 +1942,10 @@ func (opts *Options) SetRecycleLogFileNum(value uint) { C.rocksdb_options_set_recycle_log_file_num(opts.c, C.size_t(value)) } -// // GetRecycleLogFileNum returns setting for number of recycling log files. -// func (opts *Options) GetRecycleLogFileNum() uint { -// return uint(C.rocksdb_options_get_recycle_log_file_num(opts.c)) -// } +// GetRecycleLogFileNum returns setting for number of recycling log files. +func (opts *Options) GetRecycleLogFileNum() uint { + return uint(C.rocksdb_options_get_recycle_log_file_num(opts.c)) +} // SetWALBytesPerSync same as bytes_per_sync, but applies to WAL files. // @@ -1956,10 +1956,10 @@ func (opts *Options) SetWALBytesPerSync(value uint64) { C.rocksdb_options_set_wal_bytes_per_sync(opts.c, C.uint64_t(value)) } -// // GetWALBytesPerSync same as bytes_per_sync, but applies to WAL files. -// func (opts *Options) GetWALBytesPerSync() uint64 { -// return uint64(C.rocksdb_options_get_wal_bytes_per_sync(opts.c)) -// } +// GetWALBytesPerSync same as bytes_per_sync, but applies to WAL files. +func (opts *Options) GetWALBytesPerSync() uint64 { + return uint64(C.rocksdb_options_get_wal_bytes_per_sync(opts.c)) +} // SetWritableFileMaxBufferSize is the maximum buffer size that is // used by WritableFileWriter. @@ -1975,15 +1975,15 @@ func (opts *Options) SetWritableFileMaxBufferSize(value uint64) { C.rocksdb_options_set_writable_file_max_buffer_size(opts.c, C.uint64_t(value)) } -// // GetWritableFileMaxBufferSize returns the maximum buffer size that is -// // used by WritableFileWriter. -// // On Windows, we need to maintain an aligned buffer for writes. -// // We allow the buffer to grow until it's size hits the limit in buffered -// // IO and fix the buffer size when using direct IO to ensure alignment of -// // write requests if the logical sector size is unusual -// func (opts *Options) GetWritableFileMaxBufferSize() uint64 { -// return uint64(C.rocksdb_options_get_writable_file_max_buffer_size(opts.c)) -// } +// GetWritableFileMaxBufferSize returns the maximum buffer size that is +// used by WritableFileWriter. +// On Windows, we need to maintain an aligned buffer for writes. +// We allow the buffer to grow until it's size hits the limit in buffered +// IO and fix the buffer size when using direct IO to ensure alignment of +// write requests if the logical sector size is unusual +func (opts *Options) GetWritableFileMaxBufferSize() uint64 { + return uint64(C.rocksdb_options_get_writable_file_max_buffer_size(opts.c)) +} // SetEnableWriteThreadAdaptiveYield if true, threads synchronizing with // the write batch group leader will wait for up to write_thread_max_yield_usec @@ -1996,14 +1996,14 @@ func (opts *Options) SetEnableWriteThreadAdaptiveYield(value bool) { C.rocksdb_options_set_enable_write_thread_adaptive_yield(opts.c, boolToChar(value)) } -// // EnabledWriteThreadAdaptiveYield if true, threads synchronizing with -// // the write batch group leader will wait for up to write_thread_max_yield_usec -// // before blocking on a mutex. This can substantially improve throughput -// // for concurrent workloads, regardless of whether allow_concurrent_memtable_write -// // is enabled. -// func (opts *Options) EnabledWriteThreadAdaptiveYield() bool { -// return charToBool(C.rocksdb_options_get_enable_write_thread_adaptive_yield(opts.c)) -// } +// EnabledWriteThreadAdaptiveYield if true, threads synchronizing with +// the write batch group leader will wait for up to write_thread_max_yield_usec +// before blocking on a mutex. This can substantially improve throughput +// for concurrent workloads, regardless of whether allow_concurrent_memtable_write +// is enabled. +func (opts *Options) EnabledWriteThreadAdaptiveYield() bool { + return charToBool(C.rocksdb_options_get_enable_write_thread_adaptive_yield(opts.c)) +} // SetReportBackgroundIOStats measures IO stats in compactions and // flushes, if true. @@ -2015,11 +2015,11 @@ func (opts *Options) SetReportBackgroundIOStats(value bool) { C.rocksdb_options_set_report_bg_io_stats(opts.c, C.int(boolToChar(value))) } -// // ReportBackgroundIOStats returns if measureing IO stats in compactions and -// // flushes is turned on. -// func (opts *Options) ReportBackgroundIOStats() bool { -// return charToBool(C.rocksdb_options_get_report_bg_io_stats(opts.c)) -// } +// ReportBackgroundIOStats returns if measureing IO stats in compactions and +// flushes is turned on. +func (opts *Options) ReportBackgroundIOStats() bool { + return charToBool(C.rocksdb_options_get_report_bg_io_stats(opts.c)) +} // SetUnorderedWrite sets unordered_write to true trades higher write throughput with // relaxing the immutability guarantee of snapshots. This violates the @@ -2062,10 +2062,10 @@ func (opts *Options) SetBaseBackgroundCompactions(value int) { C.rocksdb_options_set_base_background_compactions(opts.c, C.int(value)) } -// // GetBaseBackgroundCompactions gets base background compactions setting. -// func (opts *Options) GetBaseBackgroundCompactions() int { -// return int(C.rocksdb_options_get_base_background_compactions(opts.c)) -// } +// GetBaseBackgroundCompactions gets base background compactions setting. +func (opts *Options) GetBaseBackgroundCompactions() int { + return int(C.rocksdb_options_get_base_background_compactions(opts.c)) +} // SetCuckooTableFactory sets to use cuckoo table factory. // diff --git a/options_test.go b/options_test.go index 69b9c6d..15403ab 100644 --- a/options_test.go +++ b/options_test.go @@ -1,382 +1,382 @@ package grocksdb -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/require" -// ) + "github.com/stretchr/testify/require" +) -// func TestOptions(t *testing.T) { -// opts := NewDefaultOptions() -// defer opts.Destroy() +func TestOptions(t *testing.T) { + opts := NewDefaultOptions() + defer opts.Destroy() -// cto := NewCuckooTableOptions() -// opts.SetCuckooTableFactory(cto) + cto := NewCuckooTableOptions() + opts.SetCuckooTableFactory(cto) -// require.EqualValues(t, PointInTimeRecovery, opts.GetWALRecoveryMode()) -// opts.SetWALRecoveryMode(SkipAnyCorruptedRecordsRecovery) -// require.EqualValues(t, SkipAnyCorruptedRecordsRecovery, opts.GetWALRecoveryMode()) + require.EqualValues(t, PointInTimeRecovery, opts.GetWALRecoveryMode()) + opts.SetWALRecoveryMode(SkipAnyCorruptedRecordsRecovery) + require.EqualValues(t, SkipAnyCorruptedRecordsRecovery, opts.GetWALRecoveryMode()) -// require.EqualValues(t, 2, opts.GetMaxBackgroundJobs()) -// opts.SetMaxBackgroundJobs(10) -// require.EqualValues(t, 10, opts.GetMaxBackgroundJobs()) + require.EqualValues(t, 2, opts.GetMaxBackgroundJobs()) + opts.SetMaxBackgroundJobs(10) + require.EqualValues(t, 10, opts.GetMaxBackgroundJobs()) -// opts.SetMaxBackgroundCompactions(9) -// require.EqualValues(t, 9, opts.GetMaxBackgroundCompactions()) + opts.SetMaxBackgroundCompactions(9) + require.EqualValues(t, 9, opts.GetMaxBackgroundCompactions()) -// opts.SetBaseBackgroundCompactions(4) -// require.EqualValues(t, 4, opts.GetBaseBackgroundCompactions()) + opts.SetBaseBackgroundCompactions(4) + require.EqualValues(t, 4, opts.GetBaseBackgroundCompactions()) -// opts.SetMaxBackgroundFlushes(8) -// require.EqualValues(t, 8, opts.GetMaxBackgroundFlushes()) + opts.SetMaxBackgroundFlushes(8) + require.EqualValues(t, 8, opts.GetMaxBackgroundFlushes()) -// opts.SetMaxLogFileSize(1 << 30) -// require.EqualValues(t, 1<<30, opts.GetMaxLogFileSize()) + opts.SetMaxLogFileSize(1 << 30) + require.EqualValues(t, 1<<30, opts.GetMaxLogFileSize()) -// opts.SetLogFileTimeToRoll(924) -// require.EqualValues(t, 924, opts.GetLogFileTimeToRoll()) + opts.SetLogFileTimeToRoll(924) + require.EqualValues(t, 924, opts.GetLogFileTimeToRoll()) -// opts.SetKeepLogFileNum(19) -// require.EqualValues(t, 19, opts.GetKeepLogFileNum()) + opts.SetKeepLogFileNum(19) + require.EqualValues(t, 19, opts.GetKeepLogFileNum()) -// opts.SetRecycleLogFileNum(81) -// require.EqualValues(t, 81, opts.GetRecycleLogFileNum()) + opts.SetRecycleLogFileNum(81) + require.EqualValues(t, 81, opts.GetRecycleLogFileNum()) -// opts.SetSoftRateLimit(0.8) -// require.EqualValues(t, 0.8, opts.GetSoftRateLimit()) + opts.SetSoftRateLimit(0.8) + require.EqualValues(t, 0.8, opts.GetSoftRateLimit()) -// opts.SetHardRateLimit(0.5) -// require.EqualValues(t, 0.5, opts.GetHardRateLimit()) + opts.SetHardRateLimit(0.5) + require.EqualValues(t, 0.5, opts.GetHardRateLimit()) -// opts.SetSoftPendingCompactionBytesLimit(50 << 18) -// require.EqualValues(t, 50<<18, opts.GetSoftPendingCompactionBytesLimit()) + opts.SetSoftPendingCompactionBytesLimit(50 << 18) + require.EqualValues(t, 50<<18, opts.GetSoftPendingCompactionBytesLimit()) -// opts.SetHardPendingCompactionBytesLimit(50 << 19) -// require.EqualValues(t, 50<<19, opts.GetHardPendingCompactionBytesLimit()) + opts.SetHardPendingCompactionBytesLimit(50 << 19) + require.EqualValues(t, 50<<19, opts.GetHardPendingCompactionBytesLimit()) -// opts.SetRateLimitDelayMaxMilliseconds(5000) -// require.EqualValues(t, 5000, opts.GetRateLimitDelayMaxMilliseconds()) + opts.SetRateLimitDelayMaxMilliseconds(5000) + require.EqualValues(t, 5000, opts.GetRateLimitDelayMaxMilliseconds()) -// require.EqualValues(t, uint64(0x40000000), opts.GetMaxManifestFileSize()) -// opts.SetMaxManifestFileSize(23 << 10) -// require.EqualValues(t, 23<<10, opts.GetMaxManifestFileSize()) + require.EqualValues(t, uint64(0x40000000), opts.GetMaxManifestFileSize()) + opts.SetMaxManifestFileSize(23 << 10) + require.EqualValues(t, 23<<10, opts.GetMaxManifestFileSize()) -// opts.SetTableCacheNumshardbits(5) -// require.EqualValues(t, 5, opts.GetTableCacheNumshardbits()) + opts.SetTableCacheNumshardbits(5) + require.EqualValues(t, 5, opts.GetTableCacheNumshardbits()) -// opts.SetArenaBlockSize(9 << 20) -// require.EqualValues(t, 9<<20, opts.GetArenaBlockSize()) + opts.SetArenaBlockSize(9 << 20) + require.EqualValues(t, 9<<20, opts.GetArenaBlockSize()) -// opts.SetUseFsync(true) -// require.EqualValues(t, true, opts.UseFsync()) + opts.SetUseFsync(true) + require.EqualValues(t, true, opts.UseFsync()) -// opts.SetLevelCompactionDynamicLevelBytes(true) -// require.EqualValues(t, true, opts.GetLevelCompactionDynamicLevelBytes()) + opts.SetLevelCompactionDynamicLevelBytes(true) + require.EqualValues(t, true, opts.GetLevelCompactionDynamicLevelBytes()) -// opts.SetWALTtlSeconds(52) -// require.EqualValues(t, 52, opts.GetWALTtlSeconds()) + opts.SetWALTtlSeconds(52) + require.EqualValues(t, 52, opts.GetWALTtlSeconds()) -// opts.SetWalSizeLimitMb(540) -// require.EqualValues(t, 540, opts.GetWalSizeLimitMb()) + opts.SetWalSizeLimitMb(540) + require.EqualValues(t, 540, opts.GetWalSizeLimitMb()) -// require.EqualValues(t, 4<<20, opts.GetManifestPreallocationSize()) -// opts.SetManifestPreallocationSize(5 << 10) -// require.EqualValues(t, 5<<10, opts.GetManifestPreallocationSize()) + require.EqualValues(t, 4<<20, opts.GetManifestPreallocationSize()) + opts.SetManifestPreallocationSize(5 << 10) + require.EqualValues(t, 5<<10, opts.GetManifestPreallocationSize()) -// opts.SetAllowMmapReads(true) -// require.EqualValues(t, true, opts.AllowMmapReads()) + opts.SetAllowMmapReads(true) + require.EqualValues(t, true, opts.AllowMmapReads()) -// require.EqualValues(t, false, opts.AllowMmapWrites()) -// opts.SetAllowMmapWrites(true) -// require.EqualValues(t, true, opts.AllowMmapWrites()) + require.EqualValues(t, false, opts.AllowMmapWrites()) + opts.SetAllowMmapWrites(true) + require.EqualValues(t, true, opts.AllowMmapWrites()) -// opts.SetUseDirectReads(true) -// require.EqualValues(t, true, opts.UseDirectReads()) + opts.SetUseDirectReads(true) + require.EqualValues(t, true, opts.UseDirectReads()) -// opts.SetUseDirectIOForFlushAndCompaction(true) -// require.EqualValues(t, true, opts.UseDirectIOForFlushAndCompaction()) + opts.SetUseDirectIOForFlushAndCompaction(true) + require.EqualValues(t, true, opts.UseDirectIOForFlushAndCompaction()) -// opts.SetIsFdCloseOnExec(true) -// require.EqualValues(t, true, opts.IsFdCloseOnExec()) + opts.SetIsFdCloseOnExec(true) + require.EqualValues(t, true, opts.IsFdCloseOnExec()) -// opts.SetSkipLogErrorOnRecovery(true) -// require.EqualValues(t, true, opts.SkipLogErrorOnRecovery()) + opts.SetSkipLogErrorOnRecovery(true) + require.EqualValues(t, true, opts.SkipLogErrorOnRecovery()) -// opts.SetStatsDumpPeriodSec(79) -// require.EqualValues(t, 79, opts.GetStatsDumpPeriodSec()) + opts.SetStatsDumpPeriodSec(79) + require.EqualValues(t, 79, opts.GetStatsDumpPeriodSec()) -// opts.SetStatsPersistPeriodSec(97) -// require.EqualValues(t, 97, opts.GetStatsPersistPeriodSec()) + opts.SetStatsPersistPeriodSec(97) + require.EqualValues(t, 97, opts.GetStatsPersistPeriodSec()) -// opts.SetAdviseRandomOnOpen(true) -// require.EqualValues(t, true, opts.AdviseRandomOnOpen()) + opts.SetAdviseRandomOnOpen(true) + require.EqualValues(t, true, opts.AdviseRandomOnOpen()) -// opts.SetAccessHintOnCompactionStart(SequentialCompactionAccessPattern) -// require.EqualValues(t, SequentialCompactionAccessPattern, opts.GetAccessHintOnCompactionStart()) + opts.SetAccessHintOnCompactionStart(SequentialCompactionAccessPattern) + require.EqualValues(t, SequentialCompactionAccessPattern, opts.GetAccessHintOnCompactionStart()) -// opts.SetDbWriteBufferSize(1 << 30) -// require.EqualValues(t, 1<<30, opts.GetDbWriteBufferSize()) + opts.SetDbWriteBufferSize(1 << 30) + require.EqualValues(t, 1<<30, opts.GetDbWriteBufferSize()) -// opts.SetUseAdaptiveMutex(true) -// require.EqualValues(t, true, opts.UseAdaptiveMutex()) + opts.SetUseAdaptiveMutex(true) + require.EqualValues(t, true, opts.UseAdaptiveMutex()) -// opts.SetBytesPerSync(68 << 10) -// require.EqualValues(t, 68<<10, opts.GetBytesPerSync()) + opts.SetBytesPerSync(68 << 10) + require.EqualValues(t, 68<<10, opts.GetBytesPerSync()) -// opts.SetWALBytesPerSync(69 << 10) -// require.EqualValues(t, 69<<10, opts.GetWALBytesPerSync()) + opts.SetWALBytesPerSync(69 << 10) + require.EqualValues(t, 69<<10, opts.GetWALBytesPerSync()) -// opts.SetWritableFileMaxBufferSize(9 << 20) -// require.EqualValues(t, 9<<20, opts.GetWritableFileMaxBufferSize()) + opts.SetWritableFileMaxBufferSize(9 << 20) + require.EqualValues(t, 9<<20, opts.GetWritableFileMaxBufferSize()) -// opts.SetAllowConcurrentMemtableWrites(true) -// require.EqualValues(t, true, opts.AllowConcurrentMemtableWrites()) + opts.SetAllowConcurrentMemtableWrites(true) + require.EqualValues(t, true, opts.AllowConcurrentMemtableWrites()) -// opts.SetEnableWriteThreadAdaptiveYield(true) -// require.EqualValues(t, true, opts.EnabledWriteThreadAdaptiveYield()) + opts.SetEnableWriteThreadAdaptiveYield(true) + require.EqualValues(t, true, opts.EnabledWriteThreadAdaptiveYield()) -// opts.SetMaxSequentialSkipInIterations(199) -// require.EqualValues(t, 199, opts.GetMaxSequentialSkipInIterations()) + opts.SetMaxSequentialSkipInIterations(199) + require.EqualValues(t, 199, opts.GetMaxSequentialSkipInIterations()) -// opts.SetDisableAutoCompactions(true) -// require.EqualValues(t, true, opts.DisabledAutoCompactions()) + opts.SetDisableAutoCompactions(true) + require.EqualValues(t, true, opts.DisabledAutoCompactions()) -// opts.SetOptimizeFiltersForHits(true) -// require.EqualValues(t, true, opts.OptimizeFiltersForHits()) + opts.SetOptimizeFiltersForHits(true) + require.EqualValues(t, true, opts.OptimizeFiltersForHits()) -// opts.SetDeleteObsoleteFilesPeriodMicros(1234) -// require.EqualValues(t, 1234, opts.GetDeleteObsoleteFilesPeriodMicros()) + opts.SetDeleteObsoleteFilesPeriodMicros(1234) + require.EqualValues(t, 1234, opts.GetDeleteObsoleteFilesPeriodMicros()) -// opts.SetMemTablePrefixBloomSizeRatio(0.3) -// require.EqualValues(t, 0.3, opts.GetMemTablePrefixBloomSizeRatio()) + opts.SetMemTablePrefixBloomSizeRatio(0.3) + require.EqualValues(t, 0.3, opts.GetMemTablePrefixBloomSizeRatio()) -// opts.SetMaxCompactionBytes(111222) -// require.EqualValues(t, 111222, opts.GetMaxCompactionBytes()) + opts.SetMaxCompactionBytes(111222) + require.EqualValues(t, 111222, opts.GetMaxCompactionBytes()) -// opts.SetMemtableHugePageSize(223344) -// require.EqualValues(t, 223344, opts.GetMemtableHugePageSize()) + opts.SetMemtableHugePageSize(223344) + require.EqualValues(t, 223344, opts.GetMemtableHugePageSize()) -// opts.SetMaxSuccessiveMerges(99) -// require.EqualValues(t, 99, opts.GetMaxSuccessiveMerges()) + opts.SetMaxSuccessiveMerges(99) + require.EqualValues(t, 99, opts.GetMaxSuccessiveMerges()) -// opts.SetBloomLocality(5) -// require.EqualValues(t, 5, opts.GetBloomLocality()) + opts.SetBloomLocality(5) + require.EqualValues(t, 5, opts.GetBloomLocality()) -// require.EqualValues(t, false, opts.InplaceUpdateSupport()) -// opts.SetInplaceUpdateSupport(true) -// require.EqualValues(t, true, opts.InplaceUpdateSupport()) + require.EqualValues(t, false, opts.InplaceUpdateSupport()) + opts.SetInplaceUpdateSupport(true) + require.EqualValues(t, true, opts.InplaceUpdateSupport()) -// require.EqualValues(t, 10000, opts.GetInplaceUpdateNumLocks()) -// opts.SetInplaceUpdateNumLocks(8) -// require.EqualValues(t, 8, opts.GetInplaceUpdateNumLocks()) + require.EqualValues(t, 10000, opts.GetInplaceUpdateNumLocks()) + opts.SetInplaceUpdateNumLocks(8) + require.EqualValues(t, 8, opts.GetInplaceUpdateNumLocks()) -// opts.SetReportBackgroundIOStats(true) -// require.EqualValues(t, true, opts.ReportBackgroundIOStats()) + opts.SetReportBackgroundIOStats(true) + require.EqualValues(t, true, opts.ReportBackgroundIOStats()) -// opts.SetMaxTotalWalSize(10 << 30) -// require.EqualValues(t, 10<<30, opts.GetMaxTotalWalSize()) + opts.SetMaxTotalWalSize(10 << 30) + require.EqualValues(t, 10<<30, opts.GetMaxTotalWalSize()) -// opts.SetBottommostCompression(ZLibCompression) -// require.EqualValues(t, ZLibCompression, opts.GetBottommostCompression()) + opts.SetBottommostCompression(ZLibCompression) + require.EqualValues(t, ZLibCompression, opts.GetBottommostCompression()) -// require.EqualValues(t, SnappyCompression, opts.GetCompression()) -// opts.SetCompression(LZ4Compression) -// require.EqualValues(t, LZ4Compression, opts.GetCompression()) + require.EqualValues(t, SnappyCompression, opts.GetCompression()) + opts.SetCompression(LZ4Compression) + require.EqualValues(t, LZ4Compression, opts.GetCompression()) -// require.EqualValues(t, LevelCompactionStyle, opts.GetCompactionStyle()) -// opts.SetCompactionStyle(UniversalCompactionStyle) -// require.EqualValues(t, UniversalCompactionStyle, opts.GetCompactionStyle()) + require.EqualValues(t, LevelCompactionStyle, opts.GetCompactionStyle()) + opts.SetCompactionStyle(UniversalCompactionStyle) + require.EqualValues(t, UniversalCompactionStyle, opts.GetCompactionStyle()) -// require.EqualValues(t, false, opts.IsAtomicFlush()) -// opts.SetAtomicFlush(true) -// require.EqualValues(t, true, opts.IsAtomicFlush()) + require.EqualValues(t, false, opts.IsAtomicFlush()) + opts.SetAtomicFlush(true) + require.EqualValues(t, true, opts.IsAtomicFlush()) -// require.EqualValues(t, false, opts.CreateIfMissing()) -// opts.SetCreateIfMissing(true) -// require.EqualValues(t, true, opts.CreateIfMissing()) + require.EqualValues(t, false, opts.CreateIfMissing()) + opts.SetCreateIfMissing(true) + require.EqualValues(t, true, opts.CreateIfMissing()) -// require.EqualValues(t, false, opts.CreateIfMissingColumnFamilies()) -// opts.SetCreateIfMissingColumnFamilies(true) -// require.EqualValues(t, true, opts.CreateIfMissingColumnFamilies()) + require.EqualValues(t, false, opts.CreateIfMissingColumnFamilies()) + opts.SetCreateIfMissingColumnFamilies(true) + require.EqualValues(t, true, opts.CreateIfMissingColumnFamilies()) -// opts.SetErrorIfExists(true) -// require.EqualValues(t, true, opts.ErrorIfExists()) + opts.SetErrorIfExists(true) + require.EqualValues(t, true, opts.ErrorIfExists()) -// opts.SetParanoidChecks(true) -// require.EqualValues(t, true, opts.ParanoidChecks()) + opts.SetParanoidChecks(true) + require.EqualValues(t, true, opts.ParanoidChecks()) -// require.EqualValues(t, InfoInfoLogLevel, opts.GetInfoLogLevel()) -// opts.SetInfoLogLevel(WarnInfoLogLevel) -// require.EqualValues(t, WarnInfoLogLevel, opts.GetInfoLogLevel()) + require.EqualValues(t, InfoInfoLogLevel, opts.GetInfoLogLevel()) + opts.SetInfoLogLevel(WarnInfoLogLevel) + require.EqualValues(t, WarnInfoLogLevel, opts.GetInfoLogLevel()) -// require.EqualValues(t, 64<<20, opts.GetWriteBufferSize()) -// opts.SetWriteBufferSize(1 << 19) -// require.EqualValues(t, 1<<19, opts.GetWriteBufferSize()) + require.EqualValues(t, 64<<20, opts.GetWriteBufferSize()) + opts.SetWriteBufferSize(1 << 19) + require.EqualValues(t, 1<<19, opts.GetWriteBufferSize()) -// require.EqualValues(t, 2, opts.GetMaxWriteBufferNumber()) -// opts.SetMaxWriteBufferNumber(15) -// require.EqualValues(t, 15, opts.GetMaxWriteBufferNumber()) + require.EqualValues(t, 2, opts.GetMaxWriteBufferNumber()) + opts.SetMaxWriteBufferNumber(15) + require.EqualValues(t, 15, opts.GetMaxWriteBufferNumber()) -// require.EqualValues(t, 1, opts.GetMinWriteBufferNumberToMerge()) -// opts.SetMinWriteBufferNumberToMerge(2) -// require.EqualValues(t, 2, opts.GetMinWriteBufferNumberToMerge()) + require.EqualValues(t, 1, opts.GetMinWriteBufferNumberToMerge()) + opts.SetMinWriteBufferNumberToMerge(2) + require.EqualValues(t, 2, opts.GetMinWriteBufferNumberToMerge()) -// require.EqualValues(t, -1, opts.GetMaxOpenFiles()) -// opts.SetMaxOpenFiles(999) -// require.EqualValues(t, 999, opts.GetMaxOpenFiles()) + require.EqualValues(t, -1, opts.GetMaxOpenFiles()) + opts.SetMaxOpenFiles(999) + require.EqualValues(t, 999, opts.GetMaxOpenFiles()) -// require.EqualValues(t, 16, opts.GetMaxFileOpeningThreads()) -// opts.SetMaxFileOpeningThreads(21) -// require.EqualValues(t, 21, opts.GetMaxFileOpeningThreads()) + require.EqualValues(t, 16, opts.GetMaxFileOpeningThreads()) + opts.SetMaxFileOpeningThreads(21) + require.EqualValues(t, 21, opts.GetMaxFileOpeningThreads()) -// opts.SetCompressionPerLevel([]CompressionType{ZLibCompression, SnappyCompression}) + opts.SetCompressionPerLevel([]CompressionType{ZLibCompression, SnappyCompression}) -// opts.SetEnv(NewMemEnv()) -// opts.SetEnv(NewDefaultEnv()) + opts.SetEnv(NewMemEnv()) + opts.SetEnv(NewDefaultEnv()) -// opts.IncreaseParallelism(8) + opts.IncreaseParallelism(8) -// opts.OptimizeForPointLookup(19 << 20) + opts.OptimizeForPointLookup(19 << 20) -// opts.OptimizeLevelStyleCompaction(10 << 20) + opts.OptimizeLevelStyleCompaction(10 << 20) -// opts.OptimizeUniversalStyleCompaction(20 << 20) + opts.OptimizeUniversalStyleCompaction(20 << 20) -// require.EqualValues(t, true, opts.AllowConcurrentMemtableWrites()) -// opts.SetAllowConcurrentMemtableWrites(false) -// require.EqualValues(t, false, opts.AllowConcurrentMemtableWrites()) + require.EqualValues(t, true, opts.AllowConcurrentMemtableWrites()) + opts.SetAllowConcurrentMemtableWrites(false) + require.EqualValues(t, false, opts.AllowConcurrentMemtableWrites()) -// opts.SetCompressionOptionsZstdMaxTrainBytes(123 << 20) -// opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(234<<20, true) + opts.SetCompressionOptionsZstdMaxTrainBytes(123 << 20) + opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(234<<20, true) -// opts.SetBottommostCompressionOptions(NewDefaultCompressionOptions(), true) -// opts.SetCompressionOptions(NewDefaultCompressionOptions()) -// opts.SetMinLevelToCompress(2) + opts.SetBottommostCompressionOptions(NewDefaultCompressionOptions(), true) + opts.SetCompressionOptions(NewDefaultCompressionOptions()) + opts.SetMinLevelToCompress(2) -// require.EqualValues(t, 7, opts.GetNumLevels()) -// opts.SetNumLevels(8) -// require.EqualValues(t, 8, opts.GetNumLevels()) + require.EqualValues(t, 7, opts.GetNumLevels()) + opts.SetNumLevels(8) + require.EqualValues(t, 8, opts.GetNumLevels()) -// require.EqualValues(t, 2, opts.GetLevel0FileNumCompactionTrigger()) -// opts.SetLevel0FileNumCompactionTrigger(14) -// require.EqualValues(t, 14, opts.GetLevel0FileNumCompactionTrigger()) + require.EqualValues(t, 2, opts.GetLevel0FileNumCompactionTrigger()) + opts.SetLevel0FileNumCompactionTrigger(14) + require.EqualValues(t, 14, opts.GetLevel0FileNumCompactionTrigger()) -// require.EqualValues(t, 20, opts.GetLevel0SlowdownWritesTrigger()) -// opts.SetLevel0SlowdownWritesTrigger(17) -// require.EqualValues(t, 17, opts.GetLevel0SlowdownWritesTrigger()) + require.EqualValues(t, 20, opts.GetLevel0SlowdownWritesTrigger()) + opts.SetLevel0SlowdownWritesTrigger(17) + require.EqualValues(t, 17, opts.GetLevel0SlowdownWritesTrigger()) -// require.EqualValues(t, 36, opts.GetLevel0StopWritesTrigger()) -// opts.SetLevel0StopWritesTrigger(47) -// require.EqualValues(t, 47, opts.GetLevel0StopWritesTrigger()) + require.EqualValues(t, 36, opts.GetLevel0StopWritesTrigger()) + opts.SetLevel0StopWritesTrigger(47) + require.EqualValues(t, 47, opts.GetLevel0StopWritesTrigger()) -// opts.SetMaxMemCompactionLevel(4) + opts.SetMaxMemCompactionLevel(4) -// require.EqualValues(t, uint64(0x140000), opts.GetTargetFileSizeBase()) -// opts.SetTargetFileSizeBase(41 << 20) -// require.EqualValues(t, 41<<20, opts.GetTargetFileSizeBase()) + require.EqualValues(t, uint64(0x140000), opts.GetTargetFileSizeBase()) + opts.SetTargetFileSizeBase(41 << 20) + require.EqualValues(t, 41<<20, opts.GetTargetFileSizeBase()) -// require.EqualValues(t, 1, opts.GetTargetFileSizeMultiplier()) -// opts.SetTargetFileSizeMultiplier(3) -// require.EqualValues(t, 3, opts.GetTargetFileSizeMultiplier()) + require.EqualValues(t, 1, opts.GetTargetFileSizeMultiplier()) + opts.SetTargetFileSizeMultiplier(3) + require.EqualValues(t, 3, opts.GetTargetFileSizeMultiplier()) -// require.EqualValues(t, 10<<20, opts.GetMaxBytesForLevelBase()) -// opts.SetMaxBytesForLevelBase(1 << 30) -// require.EqualValues(t, 1<<30, opts.GetMaxBytesForLevelBase()) + require.EqualValues(t, 10<<20, opts.GetMaxBytesForLevelBase()) + opts.SetMaxBytesForLevelBase(1 << 30) + require.EqualValues(t, 1<<30, opts.GetMaxBytesForLevelBase()) -// require.EqualValues(t, 10, opts.GetMaxBytesForLevelMultiplier()) -// opts.SetMaxBytesForLevelMultiplier(12) -// require.EqualValues(t, 12, opts.GetMaxBytesForLevelMultiplier()) + require.EqualValues(t, 10, opts.GetMaxBytesForLevelMultiplier()) + opts.SetMaxBytesForLevelMultiplier(12) + require.EqualValues(t, 12, opts.GetMaxBytesForLevelMultiplier()) -// require.EqualValues(t, 1, opts.GetMaxSubcompactions()) -// opts.SetMaxSubcompactions(3) -// require.EqualValues(t, 3, opts.GetMaxSubcompactions()) + require.EqualValues(t, 1, opts.GetMaxSubcompactions()) + opts.SetMaxSubcompactions(3) + require.EqualValues(t, 3, opts.GetMaxSubcompactions()) -// opts.SetMaxBytesForLevelMultiplierAdditional([]int{2 << 20}) + opts.SetMaxBytesForLevelMultiplierAdditional([]int{2 << 20}) -// opts.SetPurgeRedundantKvsWhileFlush(true) + opts.SetPurgeRedundantKvsWhileFlush(true) -// opts.SetDbLogDir("./abc") -// opts.SetWalDir("../asdf") + opts.SetDbLogDir("./abc") + opts.SetWalDir("../asdf") -// require.EqualValues(t, false, opts.EnabledPipelinedWrite()) -// opts.SetEnablePipelinedWrite(true) -// require.EqualValues(t, true, opts.EnabledPipelinedWrite()) + require.EqualValues(t, false, opts.EnabledPipelinedWrite()) + opts.SetEnablePipelinedWrite(true) + require.EqualValues(t, true, opts.EnabledPipelinedWrite()) -// require.EqualValues(t, false, opts.UnorderedWrite()) -// opts.SetUnorderedWrite(true) -// require.EqualValues(t, true, opts.UnorderedWrite()) + require.EqualValues(t, false, opts.UnorderedWrite()) + opts.SetUnorderedWrite(true) + require.EqualValues(t, true, opts.UnorderedWrite()) -// opts.EnableStatistics() -// opts.PrepareForBulkLoad() -// opts.SetMemtableVectorRep() -// opts.SetHashLinkListRep(12) -// opts.SetHashSkipListRep(1, 2, 3) -// opts.SetPlainTableFactory(1, 2, 3.1, 12) -// opts.SetUint64AddMergeOperator() -// opts.SetDumpMallocStats(true) -// opts.SetMemtableWholeKeyFiltering(true) + opts.EnableStatistics() + opts.PrepareForBulkLoad() + opts.SetMemtableVectorRep() + opts.SetHashLinkListRep(12) + opts.SetHashSkipListRep(1, 2, 3) + opts.SetPlainTableFactory(1, 2, 3.1, 12) + opts.SetUint64AddMergeOperator() + opts.SetDumpMallocStats(true) + opts.SetMemtableWholeKeyFiltering(true) -// require.EqualValues(t, false, opts.AllowIngestBehind()) -// opts.SetAllowIngestBehind(true) -// require.EqualValues(t, true, opts.AllowIngestBehind()) + require.EqualValues(t, false, opts.AllowIngestBehind()) + opts.SetAllowIngestBehind(true) + require.EqualValues(t, true, opts.AllowIngestBehind()) -// require.EqualValues(t, false, opts.SkipStatsUpdateOnDBOpen()) -// opts.SetSkipStatsUpdateOnDBOpen(true) -// require.EqualValues(t, true, opts.SkipStatsUpdateOnDBOpen()) + require.EqualValues(t, false, opts.SkipStatsUpdateOnDBOpen()) + opts.SetSkipStatsUpdateOnDBOpen(true) + require.EqualValues(t, true, opts.SkipStatsUpdateOnDBOpen()) -// require.EqualValues(t, false, opts.SkipCheckingSSTFileSizesOnDBOpen()) -// opts.SetSkipCheckingSSTFileSizesOnDBOpen(true) -// require.EqualValues(t, true, opts.SkipCheckingSSTFileSizesOnDBOpen()) + require.EqualValues(t, false, opts.SkipCheckingSSTFileSizesOnDBOpen()) + opts.SetSkipCheckingSSTFileSizesOnDBOpen(true) + require.EqualValues(t, true, opts.SkipCheckingSSTFileSizesOnDBOpen()) -// opts.CompactionReadaheadSize(88 << 20) -// require.EqualValues(t, 88<<20, opts.GetCompactionReadaheadSize()) + opts.CompactionReadaheadSize(88 << 20) + require.EqualValues(t, 88<<20, opts.GetCompactionReadaheadSize()) -// opts.SetMaxWriteBufferSizeToMaintain(99 << 19) -// require.EqualValues(t, 99<<19, opts.GetMaxWriteBufferSizeToMaintain()) + opts.SetMaxWriteBufferSizeToMaintain(99 << 19) + require.EqualValues(t, 99<<19, opts.GetMaxWriteBufferSizeToMaintain()) -// // set compaction filter -// opts.SetCompactionFilter(NewNativeCompactionFilter(nil)) + // set compaction filter + opts.SetCompactionFilter(NewNativeCompactionFilter(nil)) -// // set comparator -// opts.SetComparator(NewNativeComparator(nil)) + // set comparator + opts.SetComparator(NewNativeComparator(nil)) -// // set merge operator -// opts.SetMergeOperator(NewNativeMergeOperator(nil)) + // set merge operator + opts.SetMergeOperator(NewNativeMergeOperator(nil)) -// // get option from string -// _, err := GetOptionsFromString(nil, "abc") -// require.Error(t, err) + // get option from string + _, err := GetOptionsFromString(nil, "abc") + require.Error(t, err) -// // deprecation soon -// opts.SetTableCacheRemoveScanCountLimit(112) + // deprecation soon + opts.SetTableCacheRemoveScanCountLimit(112) -// opts.SetMaxWriteBufferNumberToMaintain(45) -// require.EqualValues(t, 45, opts.GetMaxWriteBufferNumberToMaintain()) + opts.SetMaxWriteBufferNumberToMaintain(45) + require.EqualValues(t, 45, opts.GetMaxWriteBufferNumberToMaintain()) -// // cloning -// cl := opts.Clone() -// require.EqualValues(t, 5, cl.GetTableCacheNumshardbits()) -// } + // cloning + cl := opts.Clone() + require.EqualValues(t, 5, cl.GetTableCacheNumshardbits()) +} -// func TestOptions2(t *testing.T) { -// t.Run("SetUniversalCompactionOpts", func(t *testing.T) { -// opts := NewDefaultOptions() -// defer opts.Destroy() +func TestOptions2(t *testing.T) { + t.Run("SetUniversalCompactionOpts", func(t *testing.T) { + opts := NewDefaultOptions() + defer opts.Destroy() -// opts.SetUniversalCompactionOptions(NewDefaultUniversalCompactionOptions()) -// }) + opts.SetUniversalCompactionOptions(NewDefaultUniversalCompactionOptions()) + }) -// t.Run("SetFifoCompactionOpts", func(t *testing.T) { -// opts := NewDefaultOptions() -// defer opts.Destroy() + t.Run("SetFifoCompactionOpts", func(t *testing.T) { + opts := NewDefaultOptions() + defer opts.Destroy() -// opts.SetFIFOCompactionOptions(NewDefaultFIFOCompactionOptions()) -// }) + opts.SetFIFOCompactionOptions(NewDefaultFIFOCompactionOptions()) + }) -// t.Run("StatisticString", func(t *testing.T) { -// opts := NewDefaultOptions() -// defer opts.Destroy() + t.Run("StatisticString", func(t *testing.T) { + opts := NewDefaultOptions() + defer opts.Destroy() -// require.Empty(t, opts.GetStatisticsString()) -// }) -// } + require.Empty(t, opts.GetStatisticsString()) + }) +} diff --git a/slice_transform_test.go b/slice_transform_test.go index 73ce930..14db64d 100644 --- a/slice_transform_test.go +++ b/slice_transform_test.go @@ -3,7 +3,7 @@ package grocksdb import ( "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestSliceTransform(t *testing.T) { @@ -13,9 +13,9 @@ func TestSliceTransform(t *testing.T) { defer db.Close() wo := NewDefaultWriteOptions() - ensure.Nil(t, db.Put(wo, []byte("foo1"), []byte("foo"))) - ensure.Nil(t, db.Put(wo, []byte("foo2"), []byte("foo"))) - ensure.Nil(t, db.Put(wo, []byte("bar1"), []byte("bar"))) + require.Nil(t, db.Put(wo, []byte("foo1"), []byte("foo"))) + require.Nil(t, db.Put(wo, []byte("foo2"), []byte("foo"))) + require.Nil(t, db.Put(wo, []byte("bar1"), []byte("bar"))) iter := db.NewIterator(NewDefaultReadOptions()) defer iter.Close() @@ -24,8 +24,8 @@ func TestSliceTransform(t *testing.T) { for iter.Seek(prefix); iter.ValidForPrefix(prefix); iter.Next() { numFound++ } - ensure.Nil(t, iter.Err()) - ensure.DeepEqual(t, numFound, 2) + require.Nil(t, iter.Err()) + require.EqualValues(t, numFound, 2) } func TestFixedPrefixTransformOpen(t *testing.T) { diff --git a/transactiondb_test.go b/transactiondb_test.go index cee53c0..1df7e28 100644 --- a/transactiondb_test.go +++ b/transactiondb_test.go @@ -4,7 +4,7 @@ import ( "io/ioutil" "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestOpenTransactionDb(t *testing.T) { @@ -29,67 +29,67 @@ func TestTransactionDBCRUD(t *testing.T) { ) // create - ensure.Nil(t, db.Put(wo, givenKey, givenVal1)) + require.Nil(t, db.Put(wo, givenKey, givenVal1)) // retrieve v1, err := db.Get(ro, givenKey) defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), givenVal1) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), givenVal1) // update - ensure.Nil(t, db.Put(wo, givenKey, givenVal2)) + require.Nil(t, db.Put(wo, givenKey, givenVal2)) v2, err := db.Get(ro, givenKey) defer v2.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v2.Data(), givenVal2) + require.Nil(t, err) + require.EqualValues(t, v2.Data(), givenVal2) // delete - ensure.Nil(t, db.Delete(wo, givenKey)) + require.Nil(t, db.Delete(wo, givenKey)) v3, err := db.Get(ro, givenKey) defer v3.Free() - ensure.Nil(t, err) - ensure.True(t, v3.Data() == nil) + require.Nil(t, err) + require.True(t, v3.Data() == nil) // transaction txn := db.TransactionBegin(wo, to, nil) defer txn.Destroy() // create - ensure.Nil(t, txn.Put(givenTxnKey, givenTxnVal1)) + require.Nil(t, txn.Put(givenTxnKey, givenTxnVal1)) v4, err := txn.Get(ro, givenTxnKey) defer v4.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v4.Data(), givenTxnVal1) + require.Nil(t, err) + require.EqualValues(t, v4.Data(), givenTxnVal1) - ensure.Nil(t, txn.Commit()) + require.Nil(t, txn.Commit()) v5, err := db.Get(ro, givenTxnKey) defer v5.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v5.Data(), givenTxnVal1) + require.Nil(t, err) + require.EqualValues(t, v5.Data(), givenTxnVal1) // transaction txn2 := db.TransactionBegin(wo, to, nil) defer txn2.Destroy() // create - ensure.Nil(t, txn2.Put(givenTxnKey2, givenTxnVal1)) + require.Nil(t, txn2.Put(givenTxnKey2, givenTxnVal1)) // rollback - ensure.Nil(t, txn2.Rollback()) + require.Nil(t, txn2.Rollback()) v6, err := txn2.Get(ro, givenTxnKey2) defer v6.Free() - ensure.Nil(t, err) - ensure.True(t, v6.Data() == nil) + require.Nil(t, err) + require.True(t, v6.Data() == nil) // transaction txn3 := db.TransactionBegin(wo, to, nil) defer txn3.Destroy() // delete - ensure.Nil(t, txn3.Delete(givenTxnKey)) - ensure.Nil(t, txn3.Commit()) + require.Nil(t, txn3.Delete(givenTxnKey)) + require.Nil(t, txn3.Commit()) v7, err := db.Get(ro, givenTxnKey) defer v7.Free() - ensure.Nil(t, err) - ensure.True(t, v7.Data() == nil) + require.Nil(t, err) + require.True(t, v7.Data() == nil) } @@ -114,7 +114,7 @@ func TestTransactionDBGetForUpdate(t *testing.T) { v, err := txn.GetForUpdate(ro, givenKey) defer v.Free() - ensure.Nil(t, err) + require.Nil(t, err) // expect lock timeout error to be thrown if err := db.Put(wo, givenKey, givenVal); err == nil { @@ -124,7 +124,7 @@ func TestTransactionDBGetForUpdate(t *testing.T) { func newTestTransactionDB(t *testing.T, name string, applyOpts func(opts *Options, transactionDBOpts *TransactionDBOptions)) *TransactionDB { dir, err := ioutil.TempDir("", "gorockstransactiondb-"+name) - ensure.Nil(t, err) + require.Nil(t, err) opts := NewDefaultOptions() opts.SetCreateIfMissing(true) @@ -133,7 +133,7 @@ func newTestTransactionDB(t *testing.T, name string, applyOpts func(opts *Option applyOpts(opts, transactionDBOpts) } db, err := OpenTransactionDb(opts, transactionDBOpts, dir) - ensure.Nil(t, err) + require.Nil(t, err) return db } diff --git a/write_batch_test.go b/write_batch_test.go index c9b0f1f..33d3a93 100644 --- a/write_batch_test.go +++ b/write_batch_test.go @@ -3,7 +3,7 @@ package grocksdb import ( "testing" - "github.com/facebookgo/ensure" + "github.com/stretchr/testify/require" ) func TestWriteBatch(t *testing.T) { @@ -16,41 +16,41 @@ func TestWriteBatch(t *testing.T) { givenKey2 = []byte("key2") ) wo := NewDefaultWriteOptions() - ensure.Nil(t, db.Put(wo, givenKey2, []byte("foo"))) + require.Nil(t, db.Put(wo, givenKey2, []byte("foo"))) // create and fill the write batch wb := NewWriteBatch() defer wb.Destroy() wb.Put(givenKey1, givenVal1) wb.Delete(givenKey2) - ensure.DeepEqual(t, wb.Count(), 2) + require.EqualValues(t, wb.Count(), 2) // perform the batch - ensure.Nil(t, db.Write(wo, wb)) + require.Nil(t, db.Write(wo, wb)) // check changes ro := NewDefaultReadOptions() v1, err := db.Get(ro, givenKey1) defer v1.Free() - ensure.Nil(t, err) - ensure.DeepEqual(t, v1.Data(), givenVal1) + require.Nil(t, err) + require.EqualValues(t, v1.Data(), givenVal1) v2, err := db.Get(ro, givenKey2) defer v2.Free() - ensure.Nil(t, err) - ensure.True(t, v2.Data() == nil) + require.Nil(t, err) + require.True(t, v2.Data() == nil) // DeleteRange test wb.Clear() wb.DeleteRange(givenKey1, givenKey2) // perform the batch - ensure.Nil(t, db.Write(wo, wb)) + require.Nil(t, db.Write(wo, wb)) v1, err = db.Get(ro, givenKey1) defer v1.Free() - ensure.Nil(t, err) - ensure.True(t, v1.Data() == nil) + require.Nil(t, err) + require.True(t, v1.Data() == nil) } func TestWriteBatchIterator(t *testing.T) { @@ -67,21 +67,21 @@ func TestWriteBatchIterator(t *testing.T) { defer wb.Destroy() wb.Put(givenKey1, givenVal1) wb.Delete(givenKey2) - ensure.DeepEqual(t, wb.Count(), 2) + require.EqualValues(t, wb.Count(), 2) // iterate over the batch iter := wb.NewIterator() - ensure.True(t, iter.Next()) + require.True(t, iter.Next()) record := iter.Record() - ensure.DeepEqual(t, record.Type, WriteBatchValueRecord) - ensure.DeepEqual(t, record.Key, givenKey1) - ensure.DeepEqual(t, record.Value, givenVal1) + require.EqualValues(t, record.Type, WriteBatchValueRecord) + require.EqualValues(t, record.Key, givenKey1) + require.EqualValues(t, record.Value, givenVal1) - ensure.True(t, iter.Next()) + require.True(t, iter.Next()) record = iter.Record() - ensure.DeepEqual(t, record.Type, WriteBatchDeletionRecord) - ensure.DeepEqual(t, record.Key, givenKey2) + require.EqualValues(t, record.Type, WriteBatchDeletionRecord) + require.EqualValues(t, record.Key, givenKey2) // there shouldn't be any left - ensure.False(t, iter.Next()) + require.False(t, iter.Next()) }