From c41f9eb4024dcb3108afa58dce4c4df0f6967362 Mon Sep 17 00:00:00 2001 From: Senthil Nathan N Date: Wed, 15 Apr 2020 23:10:08 +0530 Subject: [PATCH] [FAB-17667] move couchdb util package to statecouchdb package (#926) We have a CouchDB util package that exposes API to read from and write to CouchDB. This package is used only by the statecouchdb and the tests package (i.e., ledger level integration test). In general, a util package is justified only when an API needs to be used by so many other packages. For e.g., if we have 10 packages and each of them need a common API, it is not good to duplicate the API code and instead create a separate util package. Hence, we move all files in the CouchDB util package to the statecouchdb package itself. Note that there is a lot of opportunities to refactor couchdb package but we will do that in a separate PR. Given that we have moved couchdb util to the statecouchdb package, we can have only one TestMain. It would be hacky to reuse existing testVDBEnv for testing couchDB too. Hence, we introduce another testEnv called testCouchDBEnv to test couchDB utils. Signed-off-by: senthil --- .../kvledger/benchmark/chainmgmt/chains.go | 3 +- core/ledger/kvledger/tests/util.go | 21 +- core/ledger/kvledger/tests/v1x_test.go | 14 +- .../privacyenabledstate/test_exports.go | 15 +- .../statedb/statecouchdb/commit_handling.go | 21 +- .../statecouchdb/commit_handling_test.go | 39 +- .../txmgmt/statedb/statecouchdb}/couchdb.go | 647 ++++++------ .../statedb/statecouchdb}/couchdb_test.go | 945 +++++++++--------- .../statecouchdb/couchdb_test_export.go | 46 + .../statedb/statecouchdb}/couchdbutil.go | 47 +- .../statedb/statecouchdb}/couchdbutil_test.go | 56 +- .../statedb/statecouchdb/couchdoc_conv.go | 37 +- .../statecouchdb/metadata_retrieval.go | 18 +- .../txmgmt/statedb/statecouchdb}/metrics.go | 2 +- .../statedb/statecouchdb}/metrics_test.go | 9 +- .../statedb/statecouchdb/redolog_test.go | 18 +- .../statedb/statecouchdb/statecouchdb.go | 94 +- .../statedb/statecouchdb/statecouchdb_test.go | 383 ++++--- .../statecouchdb/statecouchdb_test_export.go | 118 --- core/ledger/ledger_interface.go | 42 +- core/ledger/util/couchdbtest/couchdb.go | 34 - docs/source/metrics_reference.rst | 373 ++++--- internal/peer/node/config.go | 5 +- internal/peer/node/config_test.go | 7 +- 24 files changed, 1461 insertions(+), 1533 deletions(-) rename core/ledger/{util/couchdb => kvledger/txmgmt/statedb/statecouchdb}/couchdb.go (73%) rename core/ledger/{util/couchdb => kvledger/txmgmt/statedb/statecouchdb}/couchdb_test.go (66%) create mode 100644 core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test_export.go rename core/ledger/{util/couchdb => kvledger/txmgmt/statedb/statecouchdb}/couchdbutil.go (85%) rename core/ledger/{util/couchdb => kvledger/txmgmt/statedb/statecouchdb}/couchdbutil_test.go (84%) rename core/ledger/{util/couchdb => kvledger/txmgmt/statedb/statecouchdb}/metrics.go (97%) rename core/ledger/{util/couchdb => kvledger/txmgmt/statedb/statecouchdb}/metrics_test.go (81%) delete mode 100644 core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test_export.go delete mode 100644 core/ledger/util/couchdbtest/couchdb.go diff --git a/core/ledger/kvledger/benchmark/chainmgmt/chains.go b/core/ledger/kvledger/benchmark/chainmgmt/chains.go index 725c9664548..100e7fb69df 100644 --- a/core/ledger/kvledger/benchmark/chainmgmt/chains.go +++ b/core/ledger/kvledger/benchmark/chainmgmt/chains.go @@ -17,7 +17,6 @@ import ( "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/ledgermgmt" "github.com/hyperledger/fabric/core/ledger/ledgermgmt/ledgermgmttest" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" ) // ChainID is a type used for the ids for the chains for experiments @@ -50,7 +49,7 @@ func newChainsMgr(mgrConf *ChainMgrConf, batchConf *BatchConf, initOp chainInitO panic("environment variable 'useCouchDB' is set to true but 'COUCHDB_ADDR' is not set") } ledgermgmtInitializer.Config.StateDBConfig.StateDatabase = "CouchDB" - ledgermgmtInitializer.Config.StateDBConfig.CouchDB = &couchdb.Config{ + ledgermgmtInitializer.Config.StateDBConfig.CouchDB = &ledger.CouchDBConfig{ Address: couchdbAddr, RedoLogPath: filepath.Join(dataDir, "couchdbRedologs"), UserCacheSizeMBs: 500, diff --git a/core/ledger/kvledger/tests/util.go b/core/ledger/kvledger/tests/util.go index bb90fbda1a0..53e0609852a 100644 --- a/core/ledger/kvledger/tests/util.go +++ b/core/ledger/kvledger/tests/util.go @@ -17,13 +17,12 @@ import ( configtxtest "github.com/hyperledger/fabric/common/configtx/test" "github.com/hyperledger/fabric/common/crypto" "github.com/hyperledger/fabric/common/flogging" - "github.com/hyperledger/fabric/common/metrics/disabled" "github.com/hyperledger/fabric/common/policydsl" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/kvledger/tests/fakes" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb/statecouchdb" "github.com/hyperledger/fabric/internal/pkg/txflags" "github.com/hyperledger/fabric/protoutil" - "github.com/stretchr/testify/require" ) var logger = flogging.MustGetLogger("test2") @@ -231,18 +230,6 @@ func setBlockFlagsToValid(block *common.Block) { txflags.NewWithValues(len(block.Data.Data), protopeer.TxValidationCode_VALID) } -func dropCouchDBs(t *testing.T, couchdbConfig *couchdb.Config) { - couchInstance, err := couchdb.CreateCouchInstance(couchdbConfig, &disabled.Provider{}) - require.NoError(t, err) - dbNames, err := couchInstance.RetrieveApplicationDBNames() - require.NoError(t, err) - for _, dbName := range dbNames { - db := &couchdb.CouchDatabase{ - CouchInstance: couchInstance, - DBName: dbName, - } - response, err := db.DropDatabase() - require.NoError(t, err) - require.True(t, response.Ok) - } +func dropCouchDBs(t *testing.T, couchdbConfig *ledger.CouchDBConfig) { + statecouchdb.DeleteApplicationDBs(t, couchdbConfig) } diff --git a/core/ledger/kvledger/tests/v1x_test.go b/core/ledger/kvledger/tests/v1x_test.go index 75225ec441b..d71de2b803c 100644 --- a/core/ledger/kvledger/tests/v1x_test.go +++ b/core/ledger/kvledger/tests/v1x_test.go @@ -16,10 +16,10 @@ import ( protopeer "github.com/hyperledger/fabric-protos-go/peer" "github.com/hyperledger/fabric/common/ledger/testutil" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb/statecouchdb" "github.com/hyperledger/fabric/core/ledger/mock" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" - "github.com/hyperledger/fabric/core/ledger/util/couchdbtest" "github.com/stretchr/testify/require" ) @@ -76,11 +76,11 @@ func TestV13WithStateCouchdb(t *testing.T) { fmt.Sprintf("%s:%s", couchdbDataUnzipDir, "/opt/couchdb/data"), fmt.Sprintf("%s:%s", localdHostDir, "/opt/couchdb/etc/local.d"), } - couchAddress, cleanup := couchdbtest.CouchDBSetup(couchdbBinds) + couchAddress, cleanup := couchDBSetup(t, couchdbBinds) defer cleanup() // set required config data to use state couchdb - couchdbConfig := &couchdb.Config{ + couchdbConfig := &ledger.CouchDBConfig{ Address: couchAddress, Username: "", Password: "", @@ -114,7 +114,11 @@ func TestV13WithStateCouchdb(t *testing.T) { dataHelper.verify(h2) } -func checkInitLedgerPanicAndDropDBs(t *testing.T, env *env, ledgerFSRoot string, couchdbConfig *couchdb.Config) { +func couchDBSetup(t *testing.T, binds []string) (addr string, cleanup func()) { + return statecouchdb.StartCouchDB(t, binds) +} + +func checkInitLedgerPanicAndDropDBs(t *testing.T, env *env, ledgerFSRoot string, couchdbConfig *ledger.CouchDBConfig) { t.Logf("verifying that a panic occurs because idStore has old format and then reformat the idstore to proceed") idStorePath := kvledger.LedgerProviderPath(ledgerFSRoot) require.PanicsWithValue( diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/test_exports.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/test_exports.go index 36e5d2ecfc4..cb263e9b93c 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/test_exports.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/test_exports.go @@ -17,8 +17,6 @@ import ( "github.com/hyperledger/fabric/core/ledger/kvledger/bookkeeping" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb/statecouchdb" "github.com/hyperledger/fabric/core/ledger/mock" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" - "github.com/hyperledger/fabric/core/ledger/util/couchdbtest" "github.com/stretchr/testify/assert" ) @@ -109,13 +107,15 @@ type CouchDBCommonStorageTestEnv struct { bookkeeperTestEnv *bookkeeping.TestEnv redoPath string couchCleanup func() + couchDBConfig *ledger.CouchDBConfig } // StartExternalResource starts external couchDB resources. func (env *CouchDBCommonStorageTestEnv) StartExternalResource() { - if env.couchAddress == "" { - env.couchAddress, env.couchCleanup = couchdbtest.CouchDBSetup(nil) + if env.couchAddress != "" { + return } + env.couchAddress, env.couchCleanup = statecouchdb.StartCouchDB(env.t.(*testing.T), nil) } // StopExternalResource stops external couchDB resources. @@ -132,12 +132,13 @@ func (env *CouchDBCommonStorageTestEnv) Init(t testing.TB) { t.Fatalf("Failed to create redo log directory: %s", err) } + env.t = t env.StartExternalResource() stateDBConfig := &StateDBConfig{ StateDBConfig: &ledger.StateDBConfig{ StateDatabase: "CouchDB", - CouchDB: &couchdb.Config{ + CouchDB: &ledger.CouchDBConfig{ Address: env.couchAddress, Username: "", Password: "", @@ -161,9 +162,9 @@ func (env *CouchDBCommonStorageTestEnv) Init(t testing.TB) { []string{"lscc", "_lifecycle"}, ) assert.NoError(t, err) - env.t = t env.provider = dbProvider env.redoPath = redoPath + env.couchDBConfig = stateDBConfig.CouchDB } // GetDBHandle implements corresponding function from interface TestEnv @@ -182,7 +183,7 @@ func (env *CouchDBCommonStorageTestEnv) GetName() string { func (env *CouchDBCommonStorageTestEnv) Cleanup() { csdbProvider := env.provider.(*CommonStorageDBProvider) if csdbProvider != nil { - statecouchdb.CleanupDB(env.t, csdbProvider.VersionedDBProvider) + statecouchdb.DeleteApplicationDBs(env.t, env.couchDBConfig) } os.RemoveAll(env.redoPath) env.bookkeeperTestEnv.Cleanup() diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling.go index bad4c56f9a5..d628e67dfbe 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling.go @@ -12,12 +12,11 @@ import ( "sync" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" "github.com/pkg/errors" ) type committer struct { - db *couchdb.CouchDatabase + db *couchDatabase batchUpdateMap map[string]*batchableDocument namespace string cacheKVs cacheKVs @@ -105,7 +104,7 @@ func (vdb *VersionedDB) buildCommittersForNs(ns string, nsUpdates map[string]*st return nil, err } // for each namespace, build mutiple committers based on the maxBatchSize - maxBatchSize := db.CouchInstance.MaxBatchUpdateSize() + maxBatchSize := db.couchInstance.maxBatchUpdateSize() numCommitters := 1 if maxBatchSize > 0 { numCommitters = int(math.Ceil(float64(len(nsUpdates)) / float64(maxBatchSize))) @@ -173,13 +172,13 @@ func (vdb *VersionedDB) executeCommitter(committers []*committer) error { // commitUpdates commits the given updates to couchdb func (c *committer) commitUpdates() error { - docs := []*couchdb.CouchDoc{} + docs := []*couchDoc{} for _, update := range c.batchUpdateMap { docs = append(docs, &update.CouchDoc) } // Do the bulk update into couchdb. Note that this will do retries if the entire bulk update fails or times out - responses, err := c.db.BatchUpdateDocuments(docs) + responses, err := c.db.batchUpdateDocuments(docs) if err != nil { return err } @@ -198,8 +197,8 @@ func (c *committer) commitUpdates() error { //Remove the "_rev" from the JSON before saving //this will allow the CouchDB retry logic to retry revisions without encountering //a mismatch between the "If-Match" and the "_rev" tag in the JSON - if doc.CouchDoc.JSONValue != nil { - err = removeJSONRevision(&doc.CouchDoc.JSONValue) + if doc.CouchDoc.jsonValue != nil { + err = removeJSONRevision(&doc.CouchDoc.jsonValue) if err != nil { return err } @@ -210,13 +209,13 @@ func (c *committer) commitUpdates() error { // If this is a deleted document, then retry the delete // If the delete fails due to a document not being found (404 error), // the document has already been deleted and the DeleteDoc will not return an error - err = c.db.DeleteDoc(resp.ID, "") + err = c.db.deleteDoc(resp.ID, "") } else { logger.Warningf("CouchDB batch document update encountered an problem. Reason:%s, Retrying update for document ID:%s", resp.Reason, resp.ID) // Save the individual document to couchdb // Note that this will do retries as needed var revision string - revision, err = c.db.SaveDoc(resp.ID, "", &doc.CouchDoc) + revision, err = c.db.saveDoc(resp.ID, "", &doc.CouchDoc) c.updateRevisionInCacheUpdate(resp.ID, revision) } @@ -293,7 +292,7 @@ func (vdb *VersionedDB) addMissingRevisionsFromDB(ns string, missingKeys []strin return err } - logger.Debugf("Pulling revisions for the [%d] keys for namsespace [%s] that were not part of the readset", len(missingKeys), db.DBName) + logger.Debugf("Pulling revisions for the [%d] keys for namsespace [%s] that were not part of the readset", len(missingKeys), db.dbName) retrievedMetadata, err := retrieveNsMetadata(db, missingKeys) if err != nil { return err @@ -307,6 +306,6 @@ func (vdb *VersionedDB) addMissingRevisionsFromDB(ns string, missingKeys []strin //batchableDocument defines a document for a batch type batchableDocument struct { - CouchDoc couchdb.CouchDoc + CouchDoc couchDoc Deleted bool } diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling_test.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling_test.go index 44c08e0f338..fc6bf073f71 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling_test.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/commit_handling_test.go @@ -16,11 +16,10 @@ import ( ) func TestGetRevision(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - versionedDB, err := testEnv.DBProvider.GetDBHandle("test-get-revisions") + versionedDB, err := vdbEnv.DBProvider.GetDBHandle("test-get-revisions") assert.NoError(t, err) db := versionedDB.(*VersionedDB) @@ -83,11 +82,10 @@ func TestGetRevision(t *testing.T) { } func TestBuildCommittersForNs(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - versionedDB, err := testEnv.DBProvider.GetDBHandle("test-build-committers-for-ns") + versionedDB, err := vdbEnv.DBProvider.GetDBHandle("test-build-committers-for-ns") assert.NoError(t, err) db := versionedDB.(*VersionedDB) @@ -101,7 +99,7 @@ func TestBuildCommittersForNs(t *testing.T) { nsUpdates = make(map[string]*statedb.VersionedValue) // populate updates with maxBatchSize + 1. dummyHeight := version.NewHeight(1, 1) - for i := 0; i <= env.config.MaxBatchUpdateSize; i++ { + for i := 0; i <= vdbEnv.config.MaxBatchUpdateSize; i++ { nsUpdates[strconv.Itoa(i)] = &statedb.VersionedValue{ Value: nil, Metadata: nil, @@ -118,11 +116,10 @@ func TestBuildCommittersForNs(t *testing.T) { } func TestBuildCommitters(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - versionedDB, err := testEnv.DBProvider.GetDBHandle("test-build-committers") + versionedDB, err := vdbEnv.DBProvider.GetDBHandle("test-build-committers") assert.NoError(t, err) db := versionedDB.(*VersionedDB) @@ -130,7 +127,7 @@ func TestBuildCommitters(t *testing.T) { batch := statedb.NewUpdateBatch() batch.Put("ns-1", "key1", []byte("value1"), dummyHeight) batch.Put("ns-2", "key1", []byte("value2"), dummyHeight) - for i := 0; i <= env.config.MaxBatchUpdateSize; i++ { + for i := 0; i <= vdbEnv.config.MaxBatchUpdateSize; i++ { batch.Put("maxBatch", "key1", []byte("value3"), dummyHeight) } namespaceSet := map[string]bool{ @@ -152,11 +149,10 @@ func TestBuildCommitters(t *testing.T) { } func TestExecuteCommitter(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - versionedDB, err := testEnv.DBProvider.GetDBHandle("test-execute-committer") + versionedDB, err := vdbEnv.DBProvider.GetDBHandle("test-execute-committer") assert.NoError(t, err) db := versionedDB.(*VersionedDB) @@ -213,11 +209,10 @@ func TestExecuteCommitter(t *testing.T) { } func TestCommitUpdates(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - versionedDB, err := testEnv.DBProvider.GetDBHandle("test-commitupdates") + versionedDB, err := vdbEnv.DBProvider.GetDBHandle("test-commitupdates") assert.NoError(t, err) db := versionedDB.(*VersionedDB) diff --git a/core/ledger/util/couchdb/couchdb.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go similarity index 73% rename from core/ledger/util/couchdb/couchdb.go rename to core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go index 42ca9824b5f..55edabf903c 100644 --- a/core/ledger/util/couchdb/couchdb.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb.go @@ -4,7 +4,7 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package couchdb +package statecouchdb import ( "bytes" @@ -28,23 +28,21 @@ import ( "time" "unicode/utf8" - "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/core/ledger" "github.com/pkg/errors" "go.uber.org/zap/zapcore" ) -var logger = flogging.MustGetLogger("couchdb") - //time between retry attempts in milliseconds const retryWaitTime = 125 -// DBOperationResponse is body for successful database calls. -type DBOperationResponse struct { +// dbOperationResponse is body for successful database calls. +type dbOperationResponse struct { Ok bool } -// DBInfo is body for database information. -type DBInfo struct { +// dbInfo is body for database information. +type dbInfo struct { DbName string `json:"db_name"` Sizes struct { File int `json:"file"` @@ -63,8 +61,8 @@ type DBInfo struct { InstanceStartTime string `json:"instance_start_time"` } -//ConnectionInfo is a structure for capturing the database info and version -type ConnectionInfo struct { +//connectionInfo is a structure for capturing the database info and version +type connectionInfo struct { Couchdb string `json:"couchdb"` Version string `json:"version"` Vendor struct { @@ -72,8 +70,8 @@ type ConnectionInfo struct { } `json:"vendor"` } -//RangeQueryResponse is used for processing REST range query responses from CouchDB -type RangeQueryResponse struct { +//rangeQueryResponse is used for processing REST range query responses from CouchDB +type rangeQueryResponse struct { TotalRows int32 `json:"total_rows"` Offset int32 `json:"offset"` Rows []struct { @@ -86,123 +84,85 @@ type RangeQueryResponse struct { } `json:"rows"` } -//QueryResponse is used for processing REST query responses from CouchDB -type QueryResponse struct { +//queryResponse is used for processing REST query responses from CouchDB +type queryResponse struct { Warning string `json:"warning"` Docs []json.RawMessage `json:"docs"` Bookmark string `json:"bookmark"` } -// DocMetadata is used for capturing CouchDB document header info, +// docMetadata is used for capturing CouchDB document header info, // used to capture id, version, rev and attachments returned in the query from CouchDB -type DocMetadata struct { +type docMetadata struct { ID string `json:"_id"` Rev string `json:"_rev"` Version string `json:"~version"` - AttachmentsInfo map[string]*AttachmentInfo `json:"_attachments"` + AttachmentsInfo map[string]*attachmentInfo `json:"_attachments"` } -//DocID is a minimal structure for capturing the ID from a query result -type DocID struct { +//docID is a minimal structure for capturing the ID from a query result +type docID struct { ID string `json:"_id"` } -//QueryResult is used for returning query results from CouchDB -type QueryResult struct { - ID string - Value []byte - Attachments []*AttachmentInfo -} - -// Config is a structure used to configure a CouchInstance. -type Config struct { - // Address is the hostname:port of the CouchDB database instance. - Address string - // Username is the username used to authenticate with CouchDB. This username - // must have read and write access permissions. - Username string - // Password is the password for Username. - Password string - // MaxRetries is the maximum number of times to retry CouchDB operations on - // failure. - MaxRetries int - // MaxRetriesOnStartup is the maximum number of times to retry CouchDB operations on - // failure when initializing the ledger. - MaxRetriesOnStartup int - // RequestTimeout is the timeout used for CouchDB operations. - RequestTimeout time.Duration - // InternalQueryLimit is the maximum number of records to return internally - // when querying CouchDB. - InternalQueryLimit int - // MaxBatchUpdateSize is the maximum number of records to included in CouchDB - // bulk update operations. - MaxBatchUpdateSize int - // WarmIndexesAfterNBlocks is the number of blocks after which to warm any - // CouchDB indexes. - WarmIndexesAfterNBlocks int - // CreateGlobalChangesDB determines whether or not to create the "_global_changes" - // system database. - CreateGlobalChangesDB bool - // RedoLogPath is the directory where the CouchDB redo log files are stored. - RedoLogPath string - // UserCacheSizeMBs denotes the user specified maximum mega bytes (MB) to be allocated - // for the user state cache (i.e., all chaincodes deployed by the user). Note that - // UserCacheSizeMBs needs to be a multiple of 32 MB. If it is not a multiple of 32 MB, - // the peer would round the size to the next multiple of 32 MB. - UserCacheSizeMBs int +//queryResult is used for returning query results from CouchDB +type queryResult struct { + id string + value []byte + attachments []*attachmentInfo } -//CouchInstance represents a CouchDB instance -type CouchInstance struct { - conf *Config +//couchInstance represents a CouchDB instance +type couchInstance struct { + conf *ledger.CouchDBConfig client *http.Client // a client to connect to this instance stats *stats } -//CouchDatabase represents a database within a CouchDB instance -type CouchDatabase struct { - CouchInstance *CouchInstance //connection configuration - DBName string - IndexWarmCounter int +//couchDatabase represents a database within a CouchDB instance +type couchDatabase struct { + couchInstance *couchInstance //connection configuration + dbName string + indexWarmCounter int } -//DBReturn contains an error reported by CouchDB -type DBReturn struct { +//dbReturn contains an error reported by CouchDB +type dbReturn struct { StatusCode int `json:"status_code"` Error string `json:"error"` Reason string `json:"reason"` } -//CreateIndexResponse contains an the index creation response from CouchDB -type CreateIndexResponse struct { +//createIndexResponse contains an the index creation response from CouchDB +type createIndexResponse struct { Result string `json:"result"` ID string `json:"id"` Name string `json:"name"` } -//AttachmentInfo contains the definition for an attached file for couchdb -type AttachmentInfo struct { +//attachmentInfo contains the definition for an attached file for couchdb +type attachmentInfo struct { Name string ContentType string `json:"content_type"` Length uint64 AttachmentBytes []byte `json:"data"` } -//FileDetails defines the structure needed to send an attachment to couchdb -type FileDetails struct { +//fileDetails defines the structure needed to send an attachment to couchdb +type fileDetails struct { Follows bool `json:"follows"` ContentType string `json:"content_type"` Length int `json:"length"` } -//CouchDoc defines the structure for a JSON document value -type CouchDoc struct { - JSONValue []byte - Attachments []*AttachmentInfo +//couchDoc defines the structure for a JSON document value +type couchDoc struct { + jsonValue []byte + attachments []*attachmentInfo } -//BatchRetrieveDocMetadataResponse is used for processing REST batch responses from CouchDB -type BatchRetrieveDocMetadataResponse struct { +//batchRetrieveDocMetadataResponse is used for processing REST batch responses from CouchDB +type batchRetrieveDocMetadataResponse struct { Rows []struct { ID string `json:"id"` DocMetadata struct { @@ -213,8 +173,8 @@ type BatchRetrieveDocMetadataResponse struct { } `json:"rows"` } -//BatchUpdateResponse defines a structure for batch update response -type BatchUpdateResponse struct { +//batchUpdateResponse defines a structure for batch update response +type batchUpdateResponse struct { ID string `json:"id"` Error string `json:"error"` Reason string `json:"reason"` @@ -222,21 +182,21 @@ type BatchUpdateResponse struct { Rev string `json:"rev"` } -//Base64Attachment contains the definition for an attached file for couchdb -type Base64Attachment struct { +//base64Attachment contains the definition for an attached file for couchdb +type base64Attachment struct { ContentType string `json:"content_type"` AttachmentData string `json:"data"` } -//IndexResult contains the definition for a couchdb index -type IndexResult struct { +//indexResult contains the definition for a couchdb index +type indexResult struct { DesignDocument string `json:"designdoc"` Name string `json:"name"` Definition string `json:"definition"` } -//DatabaseSecurity contains the definition for CouchDB database security -type DatabaseSecurity struct { +//databaseSecurity contains the definition for CouchDB database security +type databaseSecurity struct { Admins struct { Names []string `json:"names"` Roles []string `json:"roles"` @@ -256,12 +216,12 @@ func closeResponseBody(resp *http.Response) { } } -//CreateDatabaseIfNotExist method provides function to create database -func (dbclient *CouchDatabase) CreateDatabaseIfNotExist() error { +//createDatabaseIfNotExist method provides function to create database +func (dbclient *couchDatabase) createDatabaseIfNotExist() error { - logger.Debugf("[%s] Entering CreateDatabaseIfNotExist()", dbclient.DBName) + logger.Debugf("[%s] Entering CreateDatabaseIfNotExist()", dbclient.dbName) - dbInfo, couchDBReturn, err := dbclient.GetDatabaseInfo() + dbInfo, couchDBReturn, err := dbclient.getDatabaseInfo() if err != nil { if couchDBReturn == nil || couchDBReturn.StatusCode != 404 { return err @@ -277,23 +237,23 @@ func (dbclient *CouchDatabase) CreateDatabaseIfNotExist() error { return errSecurity } - logger.Debugf("[%s] Database already exists", dbclient.DBName) + logger.Debugf("[%s] Database already exists", dbclient.dbName) - logger.Debugf("[%s] Exiting CreateDatabaseIfNotExist()", dbclient.DBName) + logger.Debugf("[%s] Exiting CreateDatabaseIfNotExist()", dbclient.dbName) return nil } - logger.Debugf("[%s] Database does not exist.", dbclient.DBName) + logger.Debugf("[%s] Database does not exist.", dbclient.dbName) - connectURL, err := url.Parse(dbclient.CouchInstance.URL()) + connectURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries //process the URL with a PUT, creates the database resp, _, err := dbclient.handleRequest(http.MethodPut, "CreateDatabaseIfNotExist", connectURL, nil, "", "", maxRetries, true, nil) @@ -305,7 +265,7 @@ func (dbclient *CouchDatabase) CreateDatabaseIfNotExist() error { // database may have been created and a false error // returned due to a timeout or race condition. // Do a final check to see if the database really got created. - dbInfo, couchDBReturn, errDbInfo := dbclient.GetDatabaseInfo() + dbInfo, couchDBReturn, errDbInfo := dbclient.getDatabaseInfo() //If there is no error, then the database exists, return without an error if errDbInfo == nil && dbInfo != nil && couchDBReturn.StatusCode == 200 { @@ -314,8 +274,8 @@ func (dbclient *CouchDatabase) CreateDatabaseIfNotExist() error { return errSecurity } - logger.Infof("[%s] Created state database", dbclient.DBName) - logger.Debugf("[%s] Exiting CreateDatabaseIfNotExist()", dbclient.DBName) + logger.Infof("[%s] Created state database", dbclient.dbName) + logger.Debugf("[%s] Exiting CreateDatabaseIfNotExist()", dbclient.dbName) return nil } @@ -329,28 +289,27 @@ func (dbclient *CouchDatabase) CreateDatabaseIfNotExist() error { return errSecurity } - logger.Infof("Created state database %s", dbclient.DBName) + logger.Infof("Created state database %s", dbclient.dbName) - logger.Debugf("[%s] Exiting CreateDatabaseIfNotExist()", dbclient.DBName) + logger.Debugf("[%s] Exiting CreateDatabaseIfNotExist()", dbclient.dbName) return nil } -//applyDatabaseSecurity -func (dbclient *CouchDatabase) applyDatabasePermissions() error { +func (dbclient *couchDatabase) applyDatabasePermissions() error { //If the username and password are not set, then skip applying permissions - if dbclient.CouchInstance.conf.Username == "" && dbclient.CouchInstance.conf.Password == "" { + if dbclient.couchInstance.conf.Username == "" && dbclient.couchInstance.conf.Password == "" { return nil } - securityPermissions := &DatabaseSecurity{} + securityPermissions := &databaseSecurity{} - securityPermissions.Admins.Names = append(securityPermissions.Admins.Names, dbclient.CouchInstance.conf.Username) - securityPermissions.Members.Names = append(securityPermissions.Members.Names, dbclient.CouchInstance.conf.Username) + securityPermissions.Admins.Names = append(securityPermissions.Admins.Names, dbclient.couchInstance.conf.Username) + securityPermissions.Members.Names = append(securityPermissions.Members.Names, dbclient.couchInstance.conf.Username) - err := dbclient.ApplyDatabaseSecurity(securityPermissions) + err := dbclient.applyDatabaseSecurity(securityPermissions) if err != nil { return err } @@ -358,17 +317,17 @@ func (dbclient *CouchDatabase) applyDatabasePermissions() error { return nil } -//GetDatabaseInfo method provides function to retrieve database information -func (dbclient *CouchDatabase) GetDatabaseInfo() (*DBInfo, *DBReturn, error) { +//getDatabaseInfo method provides function to retrieve database information +func (dbclient *couchDatabase) getDatabaseInfo() (*dbInfo, *dbReturn, error) { - connectURL, err := url.Parse(dbclient.CouchInstance.URL()) + connectURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, couchDBReturn, err := dbclient.handleRequest(http.MethodGet, "GetDatabaseInfo", connectURL, nil, "", "", maxRetries, true, nil) if err != nil { @@ -376,7 +335,7 @@ func (dbclient *CouchDatabase) GetDatabaseInfo() (*DBInfo, *DBReturn, error) { } defer closeResponseBody(resp) - dbResponse := &DBInfo{} + dbResponse := &dbInfo{} decodeErr := json.NewDecoder(resp.Body).Decode(&dbResponse) if decodeErr != nil { return nil, nil, errors.Wrap(decodeErr, "error decoding response body") @@ -389,16 +348,16 @@ func (dbclient *CouchDatabase) GetDatabaseInfo() (*DBInfo, *DBReturn, error) { } -//VerifyCouchConfig method provides function to verify the connection information -func (couchInstance *CouchInstance) VerifyCouchConfig() (*ConnectionInfo, *DBReturn, error) { +//verifyCouchConfig method provides function to verify the connection information +func (couchInstance *couchInstance) verifyCouchConfig() (*connectionInfo, *dbReturn, error) { logger.Debugf("Entering VerifyCouchConfig()") defer logger.Debugf("Exiting VerifyCouchConfig()") - connectURL, err := url.Parse(couchInstance.URL()) + connectURL, err := url.Parse(couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, nil, errors.Wrapf(err, "error parsing couch instance URL: %s", couchInstance.URL()) + return nil, nil, errors.Wrapf(err, "error parsing couch instance URL: %s", couchInstance.url()) } connectURL.Path = "/" @@ -413,7 +372,7 @@ func (couchInstance *CouchInstance) VerifyCouchConfig() (*ConnectionInfo, *DBRet } defer closeResponseBody(resp) - dbResponse := &ConnectionInfo{} + dbResponse := &connectionInfo{} decodeErr := json.NewDecoder(resp.Body).Decode(&dbResponse) if decodeErr != nil { return nil, nil, errors.Wrap(decodeErr, "error decoding response body") @@ -426,7 +385,7 @@ func (couchInstance *CouchInstance) VerifyCouchConfig() (*ConnectionInfo, *DBRet //Verifying the existence of the system database accomplishes two steps //1. Ensures the system databases are created //2. Verifies the username password provided in the CouchDB config are valid for system admin - err = CreateSystemDatabasesIfNotExist(couchInstance) + err = createSystemDatabasesIfNotExist(couchInstance) if err != nil { logger.Errorf("Unable to connect to CouchDB, error: %s. Check the admin username and password.", err) return nil, nil, errors.WithMessage(err, "unable to connect to CouchDB. Check the admin username and password") @@ -435,14 +394,14 @@ func (couchInstance *CouchInstance) VerifyCouchConfig() (*ConnectionInfo, *DBRet return dbResponse, couchDBReturn, nil } -// IsEmpty returns false if couchInstance contains any databases +// isEmpty returns false if couchInstance contains any databases // (except couchdb system databases and any database name supplied in the parameter 'databasesToIgnore') -func (couchInstance *CouchInstance) IsEmpty(databasesToIgnore []string) (bool, error) { +func (couchInstance *couchInstance) isEmpty(databasesToIgnore []string) (bool, error) { toIgnore := map[string]bool{} for _, s := range databasesToIgnore { toIgnore[s] = true } - applicationDBNames, err := couchInstance.RetrieveApplicationDBNames() + applicationDBNames, err := couchInstance.retrieveApplicationDBNames() if err != nil { return false, err } @@ -454,12 +413,12 @@ func (couchInstance *CouchInstance) IsEmpty(databasesToIgnore []string) (bool, e return true, nil } -// RetrieveApplicationDBNames returns all the applicaiton database names in the couch instance -func (couchInstance *CouchInstance) RetrieveApplicationDBNames() ([]string, error) { - connectURL, err := url.Parse(couchInstance.URL()) +// retrieveApplicationDBNames returns all the application database names in the couch instance +func (couchInstance *couchInstance) retrieveApplicationDBNames() ([]string, error) { + connectURL, err := url.Parse(couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing couch instance URL: %s", couchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing couch instance URL: %s", couchInstance.url()) } connectURL.Path = "/_all_dbs" maxRetries := couchInstance.conf.MaxRetries @@ -500,12 +459,12 @@ func isCouchSystemDBName(name string) bool { return strings.HasPrefix(name, "_") } -// HealthCheck checks if the peer is able to communicate with CouchDB -func (couchInstance *CouchInstance) HealthCheck(ctx context.Context) error { - connectURL, err := url.Parse(couchInstance.URL()) +// healthCheck checks if the peer is able to communicate with CouchDB +func (couchInstance *couchInstance) healthCheck(ctx context.Context) error { + connectURL, err := url.Parse(couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return errors.Wrapf(err, "error parsing CouchDB URL: %s", couchInstance.URL()) + return errors.Wrapf(err, "error parsing CouchDB URL: %s", couchInstance.url()) } _, _, err = couchInstance.handleRequest(ctx, http.MethodHead, "", "HealthCheck", connectURL, nil, "", "", 0, true, nil) if err != nil { @@ -514,20 +473,20 @@ func (couchInstance *CouchInstance) HealthCheck(ctx context.Context) error { return nil } -// InternalQueryLimit returns the maximum number of records to return internally +// internalQueryLimit returns the maximum number of records to return internally // when querying CouchDB. -func (couchInstance *CouchInstance) InternalQueryLimit() int32 { +func (couchInstance *couchInstance) internalQueryLimit() int32 { return int32(couchInstance.conf.InternalQueryLimit) } -// MaxBatchUpdateSize returns the maximum number of records to include in a +// maxBatchUpdateSize returns the maximum number of records to include in a // bulk update operation. -func (couchInstance *CouchInstance) MaxBatchUpdateSize() int { +func (couchInstance *couchInstance) maxBatchUpdateSize() int { return couchInstance.conf.MaxBatchUpdateSize } -// URL returns the URL for the CouchDB instance. -func (couchInstance *CouchInstance) URL() string { +// url returns the URL for the CouchDB instance. +func (couchInstance *couchInstance) url() string { URL := &url.URL{ Host: couchInstance.conf.Address, Scheme: "http", @@ -535,20 +494,20 @@ func (couchInstance *CouchInstance) URL() string { return URL.String() } -//DropDatabase provides method to drop an existing database -func (dbclient *CouchDatabase) DropDatabase() (*DBOperationResponse, error) { - dbName := dbclient.DBName +//dropDatabase provides method to drop an existing database +func (dbclient *couchDatabase) dropDatabase() (*dbOperationResponse, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering DropDatabase()", dbName) - connectURL, err := url.Parse(dbclient.CouchInstance.URL()) + connectURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodDelete, "DropDatabase", connectURL, nil, "", "", maxRetries, true, nil) if err != nil { @@ -556,17 +515,17 @@ func (dbclient *CouchDatabase) DropDatabase() (*DBOperationResponse, error) { } defer closeResponseBody(resp) - dbResponse := &DBOperationResponse{} + dbResponse := &dbOperationResponse{} decodeErr := json.NewDecoder(resp.Body).Decode(&dbResponse) if decodeErr != nil { return nil, errors.Wrap(decodeErr, "error decoding response body") } if dbResponse.Ok == true { - logger.Debugf("[%s] Dropped database", dbclient.DBName) + logger.Debugf("[%s] Dropped database", dbclient.dbName) } - logger.Debugf("[%s] Exiting DropDatabase()", dbclient.DBName) + logger.Debugf("[%s] Exiting DropDatabase()", dbclient.dbName) if dbResponse.Ok == true { @@ -578,20 +537,20 @@ func (dbclient *CouchDatabase) DropDatabase() (*DBOperationResponse, error) { } -// EnsureFullCommit calls _ensure_full_commit for explicit fsync -func (dbclient *CouchDatabase) EnsureFullCommit() (*DBOperationResponse, error) { - dbName := dbclient.DBName +// ensureFullCommit calls _ensure_full_commit for explicit fsync +func (dbclient *couchDatabase) ensureFullCommit() (*dbOperationResponse, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering EnsureFullCommit()", dbName) - connectURL, err := url.Parse(dbclient.CouchInstance.URL()) + connectURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodPost, "EnsureFullCommit", connectURL, nil, "", "", maxRetries, true, nil, "_ensure_full_commit") if err != nil { @@ -600,24 +559,24 @@ func (dbclient *CouchDatabase) EnsureFullCommit() (*DBOperationResponse, error) } defer closeResponseBody(resp) - dbResponse := &DBOperationResponse{} + dbResponse := &dbOperationResponse{} decodeErr := json.NewDecoder(resp.Body).Decode(&dbResponse) if decodeErr != nil { return nil, errors.Wrap(decodeErr, "error decoding response body") } // check if we should warm indexes - if dbclient.CouchInstance.conf.WarmIndexesAfterNBlocks > 0 { + if dbclient.couchInstance.conf.WarmIndexesAfterNBlocks > 0 { // check to see if the number of blocks committed exceeds the threshold for index warming - if dbclient.IndexWarmCounter >= dbclient.CouchInstance.conf.WarmIndexesAfterNBlocks { + if dbclient.indexWarmCounter >= dbclient.couchInstance.conf.WarmIndexesAfterNBlocks { // use a go routine to launch WarmIndexAllIndexes() go dbclient.runWarmIndexAllIndexes() - dbclient.IndexWarmCounter = 0 + dbclient.indexWarmCounter = 0 } - dbclient.IndexWarmCounter++ + dbclient.indexWarmCounter++ } - logger.Debugf("[%s] Exiting EnsureFullCommit()", dbclient.DBName) + logger.Debugf("[%s] Exiting EnsureFullCommit()", dbclient.dbName) if dbResponse.Ok == true { @@ -628,9 +587,9 @@ func (dbclient *CouchDatabase) EnsureFullCommit() (*DBOperationResponse, error) return dbResponse, errors.New("error syncing database") } -//SaveDoc method provides a function to save a document, id and byte array -func (dbclient *CouchDatabase) SaveDoc(id string, rev string, couchDoc *CouchDoc) (string, error) { - dbName := dbclient.DBName +//saveDoc method provides a function to save a document, id and byte array +func (dbclient *couchDatabase) saveDoc(id string, rev string, couchDoc *couchDoc) (string, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering SaveDoc() id=[%s]", dbName, id) @@ -638,10 +597,10 @@ func (dbclient *CouchDatabase) SaveDoc(id string, rev string, couchDoc *CouchDoc return "", errors.Errorf("doc id [%x] not a valid utf8 string", id) } - saveURL, err := url.Parse(dbclient.CouchInstance.URL()) + saveURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //Set up a buffer for the data to be pushed to couchdb @@ -654,15 +613,15 @@ func (dbclient *CouchDatabase) SaveDoc(id string, rev string, couchDoc *CouchDoc keepConnectionOpen := true //check to see if attachments is nil, if so, then this is a JSON only - if couchDoc.Attachments == nil { + if couchDoc.attachments == nil { //Test to see if this is a valid JSON - if IsJSON(string(couchDoc.JSONValue)) != true { + if isJSON(string(couchDoc.jsonValue)) != true { return "", errors.New("JSON format is not valid") } // if there are no attachments, then use the bytes passed in as the JSON - data = couchDoc.JSONValue + data = couchDoc.jsonValue } else { // there are attachments @@ -673,7 +632,7 @@ func (dbclient *CouchDatabase) SaveDoc(id string, rev string, couchDoc *CouchDoc } //If there is a zero length attachment, do not keep the connection open - for _, attach := range couchDoc.Attachments { + for _, attach := range couchDoc.attachments { if attach.Length < 1 { keepConnectionOpen = false } @@ -688,7 +647,7 @@ func (dbclient *CouchDatabase) SaveDoc(id string, rev string, couchDoc *CouchDoc } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries //handle the request for saving document with a retry if there is a revision conflict resp, _, err := dbclient.handleRequestWithRevisionRetry(id, http.MethodPut, dbName, "SaveDoc", saveURL, data, rev, defaultBoundary, maxRetries, keepConnectionOpen, nil) @@ -704,19 +663,19 @@ func (dbclient *CouchDatabase) SaveDoc(id string, rev string, couchDoc *CouchDoc return "", err } - logger.Debugf("[%s] Exiting SaveDoc()", dbclient.DBName) + logger.Debugf("[%s] Exiting SaveDoc()", dbclient.dbName) return revision, nil } //getDocumentRevision will return the revision if the document exists, otherwise it will return "" -func (dbclient *CouchDatabase) getDocumentRevision(id string) string { +func (dbclient *couchDatabase) getDocumentRevision(id string) string { var rev = "" //See if the document already exists, we need the rev for saves and deletes - _, revdoc, err := dbclient.ReadDoc(id) + _, revdoc, err := dbclient.readDoc(id) if err == nil { //set the revision to the rev returned from the document read rev = revdoc @@ -724,7 +683,7 @@ func (dbclient *CouchDatabase) getDocumentRevision(id string) string { return rev } -func createAttachmentPart(couchDoc *CouchDoc, defaultBoundary string) (bytes.Buffer, string, error) { +func createAttachmentPart(couchDoc *couchDoc, defaultBoundary string) (bytes.Buffer, string, error) { //Create a buffer for writing the result writeBuffer := new(bytes.Buffer) @@ -735,23 +694,23 @@ func createAttachmentPart(couchDoc *CouchDoc, defaultBoundary string) (bytes.Buf //retrieve the boundary for the multipart defaultBoundary = writer.Boundary() - fileAttachments := map[string]FileDetails{} + fileAttachments := map[string]fileDetails{} - for _, attachment := range couchDoc.Attachments { - fileAttachments[attachment.Name] = FileDetails{true, attachment.ContentType, len(attachment.AttachmentBytes)} + for _, attachment := range couchDoc.attachments { + fileAttachments[attachment.Name] = fileDetails{true, attachment.ContentType, len(attachment.AttachmentBytes)} } attachmentJSONMap := map[string]interface{}{ "_attachments": fileAttachments} //Add any data uploaded with the files - if couchDoc.JSONValue != nil { + if couchDoc.jsonValue != nil { //create a generic map genericMap := make(map[string]interface{}) //unmarshal the data into the generic map - decoder := json.NewDecoder(bytes.NewBuffer(couchDoc.JSONValue)) + decoder := json.NewDecoder(bytes.NewBuffer(couchDoc.jsonValue)) decoder.UseNumber() decodeErr := decoder.Decode(&genericMap) if decodeErr != nil { @@ -783,7 +742,7 @@ func createAttachmentPart(couchDoc *CouchDoc, defaultBoundary string) (bytes.Buf part.Write(filesForUpload) - for _, attachment := range couchDoc.Attachments { + for _, attachment := range couchDoc.attachments { header := make(textproto.MIMEHeader) part, err2 := writer.CreatePart(header) @@ -821,39 +780,39 @@ func getRevisionHeader(resp *http.Response) (string, error) { } -//ReadDoc method provides function to retrieve a document and its revision +//readDoc method provides function to retrieve a document and its revision //from the database by id -func (dbclient *CouchDatabase) ReadDoc(id string) (*CouchDoc, string, error) { - var couchDoc CouchDoc - attachments := []*AttachmentInfo{} - dbName := dbclient.DBName +func (dbclient *couchDatabase) readDoc(id string) (*couchDoc, string, error) { + var couchDoc couchDoc + attachments := []*attachmentInfo{} + dbName := dbclient.dbName logger.Debugf("[%s] Entering ReadDoc() id=[%s]", dbName, id) if !utf8.ValidString(id) { return nil, "", errors.Errorf("doc id [%x] not a valid utf8 string", id) } - readURL, err := url.Parse(dbclient.CouchInstance.URL()) + readURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } query := readURL.Query() query.Add("attachments", "true") //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, couchDBReturn, err := dbclient.handleRequest(http.MethodGet, "ReadDoc", readURL, nil, "", "", maxRetries, true, &query, id) if err != nil { if couchDBReturn != nil && couchDBReturn.StatusCode == 404 { - logger.Debugf("[%s] Document not found (404), returning nil value instead of 404 error", dbclient.DBName) + logger.Debugf("[%s] Document not found (404), returning nil value instead of 404 error", dbclient.dbName) // non-existent document should return nil value instead of a 404 error // for details see https://github.com/hyperledger-archives/fabric/issues/936 return nil, "", nil } - logger.Debugf("[%s] couchDBReturn=%v\n", dbclient.DBName, couchDBReturn) + logger.Debugf("[%s] couchDBReturn=%v\n", dbclient.dbName, couchDBReturn) return nil, "", err } defer closeResponseBody(resp) @@ -885,18 +844,18 @@ func (dbclient *CouchDatabase) ReadDoc(id string) (*CouchDoc, string, error) { defer p.Close() - logger.Debugf("[%s] part header=%s", dbclient.DBName, p.Header) + logger.Debugf("[%s] part header=%s", dbclient.dbName, p.Header) switch p.Header.Get("Content-Type") { case "application/json": partdata, err := ioutil.ReadAll(p) if err != nil { return nil, "", errors.Wrap(err, "error reading multipart data") } - couchDoc.JSONValue = partdata + couchDoc.jsonValue = partdata default: //Create an attachment structure and load it - attachment := &AttachmentInfo{} + attachment := &attachmentInfo{} attachment.ContentType = p.Header.Get("Content-Type") contentDispositionParts := strings.Split(p.Header.Get("Content-Disposition"), ";") if strings.TrimSpace(contentDispositionParts[0]) == "attachment" { @@ -914,7 +873,7 @@ func (dbclient *CouchDatabase) ReadDoc(id string) (*CouchDoc, string, error) { return nil, "", errors.Wrap(err, "error reading gzip data") } - logger.Debugf("[%s] Retrieved attachment data", dbclient.DBName) + logger.Debugf("[%s] Retrieved attachment data", dbclient.dbName) attachment.AttachmentBytes = respBody attachment.Length = uint64(len(attachment.AttachmentBytes)) attachment.Name = p.FileName() @@ -927,7 +886,7 @@ func (dbclient *CouchDatabase) ReadDoc(id string) (*CouchDoc, string, error) { if err != nil { return nil, "", errors.Wrap(err, "error reading multipart data") } - logger.Debugf("[%s] Retrieved attachment data", dbclient.DBName) + logger.Debugf("[%s] Retrieved attachment data", dbclient.dbName) attachment.AttachmentBytes = partdata attachment.Length = uint64(len(attachment.AttachmentBytes)) attachment.Name = p.FileName() @@ -938,35 +897,35 @@ func (dbclient *CouchDatabase) ReadDoc(id string) (*CouchDoc, string, error) { } // end content-type switch } // for all multiparts - couchDoc.Attachments = attachments + couchDoc.attachments = attachments return &couchDoc, revision, nil } //handle as JSON document - couchDoc.JSONValue, err = ioutil.ReadAll(resp.Body) + couchDoc.jsonValue, err = ioutil.ReadAll(resp.Body) if err != nil { return nil, "", errors.Wrap(err, "error reading response body") } - logger.Debugf("[%s] Exiting ReadDoc()", dbclient.DBName) + logger.Debugf("[%s] Exiting ReadDoc()", dbclient.dbName) return &couchDoc, revision, nil } -//ReadDocRange method provides function to a range of documents based on the start and end keys +//readDocRange method provides function to a range of documents based on the start and end keys //startKey and endKey can also be empty strings. If startKey and endKey are empty, all documents are returned //This function provides a limit option to specify the max number of entries and is supplied by config. //Skip is reserved for possible future future use. -func (dbclient *CouchDatabase) ReadDocRange(startKey, endKey string, limit int32) ([]*QueryResult, string, error) { - dbName := dbclient.DBName +func (dbclient *couchDatabase) readDocRange(startKey, endKey string, limit int32) ([]*queryResult, string, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering ReadDocRange() startKey=%s, endKey=%s", dbName, startKey, endKey) - var results []*QueryResult + var results []*queryResult - rangeURL, err := url.Parse(dbclient.CouchInstance.URL()) + rangeURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } queryParms := rangeURL.Query() @@ -994,7 +953,7 @@ func (dbclient *CouchDatabase) ReadDocRange(startKey, endKey string, limit int32 } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodGet, "RangeDocRange", rangeURL, nil, "", "", maxRetries, true, &queryParms, "_all_docs") if err != nil { @@ -1007,7 +966,7 @@ func (dbclient *CouchDatabase) ReadDocRange(startKey, endKey string, limit int32 if err2 != nil { log.Fatal(err2) } - logger.Debugf("[%s] %s", dbclient.DBName, dump) + logger.Debugf("[%s] %s", dbclient.dbName, dump) } //handle as JSON document @@ -1016,7 +975,7 @@ func (dbclient *CouchDatabase) ReadDocRange(startKey, endKey string, limit int32 return nil, "", errors.Wrap(err, "error reading response body") } - var jsonResponse = &RangeQueryResponse{} + var jsonResponse = &rangeQueryResponse{} err2 := json.Unmarshal(jsonResponseRaw, &jsonResponse) if err2 != nil { return nil, "", errors.Wrap(err2, "error unmarshalling json data") @@ -1028,14 +987,14 @@ func (dbclient *CouchDatabase) ReadDocRange(startKey, endKey string, limit int32 jsonResponse.TotalRows = limit } - logger.Debugf("[%s] Total Rows: %d", dbclient.DBName, jsonResponse.TotalRows) + logger.Debugf("[%s] Total Rows: %d", dbclient.dbName, jsonResponse.TotalRows) //Use the next endKey as the starting default for the nextStartKey nextStartKey := endKey for index, row := range jsonResponse.Rows { - var docMetadata = &DocMetadata{} + var docMetadata = &docMetadata{} err3 := json.Unmarshal(row.Doc, &docMetadata) if err3 != nil { return nil, "", errors.Wrap(err3, "error unmarshalling json data") @@ -1050,49 +1009,49 @@ func (dbclient *CouchDatabase) ReadDocRange(startKey, endKey string, limit int32 if docMetadata.AttachmentsInfo != nil { - logger.Debugf("[%s] Adding JSON document and attachments for id: %s", dbclient.DBName, docMetadata.ID) + logger.Debugf("[%s] Adding JSON document and attachments for id: %s", dbclient.dbName, docMetadata.ID) - attachments := []*AttachmentInfo{} + attachments := []*attachmentInfo{} for attachmentName, attachment := range docMetadata.AttachmentsInfo { attachment.Name = attachmentName attachments = append(attachments, attachment) } - var addDocument = &QueryResult{docMetadata.ID, row.Doc, attachments} + var addDocument = &queryResult{docMetadata.ID, row.Doc, attachments} results = append(results, addDocument) } else { - logger.Debugf("[%s] Adding json docment for id: %s", dbclient.DBName, docMetadata.ID) + logger.Debugf("[%s] Adding json docment for id: %s", dbclient.dbName, docMetadata.ID) - var addDocument = &QueryResult{docMetadata.ID, row.Doc, nil} + var addDocument = &queryResult{docMetadata.ID, row.Doc, nil} results = append(results, addDocument) } } - logger.Debugf("[%s] Exiting ReadDocRange()", dbclient.DBName) + logger.Debugf("[%s] Exiting ReadDocRange()", dbclient.dbName) return results, nextStartKey, nil } -//DeleteDoc method provides function to delete a document from the database by id -func (dbclient *CouchDatabase) DeleteDoc(id, rev string) error { - dbName := dbclient.DBName +//deleteDoc method provides function to delete a document from the database by id +func (dbclient *couchDatabase) deleteDoc(id, rev string) error { + dbName := dbclient.dbName logger.Debugf("[%s] Entering DeleteDoc() id=%s", dbName, id) - deleteURL, err := url.Parse(dbclient.CouchInstance.URL()) + deleteURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries //handle the request for saving document with a retry if there is a revision conflict resp, couchDBReturn, err := dbclient.handleRequestWithRevisionRetry(id, http.MethodDelete, dbName, "DeleteDoc", @@ -1100,7 +1059,7 @@ func (dbclient *CouchDatabase) DeleteDoc(id, rev string) error { if err != nil { if couchDBReturn != nil && couchDBReturn.StatusCode == 404 { - logger.Debugf("[%s] Document not found (404), returning nil value instead of 404 error", dbclient.DBName) + logger.Debugf("[%s] Document not found (404), returning nil value instead of 404 error", dbclient.dbName) // non-existent document should return nil value instead of a 404 error // for details see https://github.com/hyperledger-archives/fabric/issues/936 return nil @@ -1109,28 +1068,28 @@ func (dbclient *CouchDatabase) DeleteDoc(id, rev string) error { } defer closeResponseBody(resp) - logger.Debugf("[%s] Exiting DeleteDoc()", dbclient.DBName) + logger.Debugf("[%s] Exiting DeleteDoc()", dbclient.dbName) return nil } -//QueryDocuments method provides function for processing a query -func (dbclient *CouchDatabase) QueryDocuments(query string) ([]*QueryResult, string, error) { - dbName := dbclient.DBName +//queryDocuments method provides function for processing a query +func (dbclient *couchDatabase) queryDocuments(query string) ([]*queryResult, string, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering QueryDocuments() query=%s", dbName, query) - var results []*QueryResult + var results []*queryResult - queryURL, err := url.Parse(dbclient.CouchInstance.URL()) + queryURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, "", errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodPost, "QueryDocuments", queryURL, []byte(query), "", "", maxRetries, true, nil, "_find") if err != nil { @@ -1143,7 +1102,7 @@ func (dbclient *CouchDatabase) QueryDocuments(query string) ([]*QueryResult, str if err2 != nil { log.Fatal(err2) } - logger.Debugf("[%s] %s", dbclient.DBName, dump) + logger.Debugf("[%s] %s", dbclient.dbName, dump) } //handle as JSON document @@ -1152,7 +1111,7 @@ func (dbclient *CouchDatabase) QueryDocuments(query string) ([]*QueryResult, str return nil, "", errors.Wrap(err, "error reading response body") } - var jsonResponse = &QueryResponse{} + var jsonResponse = &queryResponse{} err2 := json.Unmarshal(jsonResponseRaw, &jsonResponse) if err2 != nil { @@ -1165,7 +1124,7 @@ func (dbclient *CouchDatabase) QueryDocuments(query string) ([]*QueryResult, str for _, row := range jsonResponse.Docs { - var docMetadata = &DocMetadata{} + var docMetadata = &docMetadata{} err3 := json.Unmarshal(row, &docMetadata) if err3 != nil { return nil, "", errors.Wrap(err3, "error unmarshalling json data") @@ -1175,32 +1134,32 @@ func (dbclient *CouchDatabase) QueryDocuments(query string) ([]*QueryResult, str // The If block below will never be executed if docMetadata.AttachmentsInfo != nil { - logger.Debugf("[%s] Adding JSON docment and attachments for id: %s", dbclient.DBName, docMetadata.ID) + logger.Debugf("[%s] Adding JSON docment and attachments for id: %s", dbclient.dbName, docMetadata.ID) - couchDoc, _, err := dbclient.ReadDoc(docMetadata.ID) + couchDoc, _, err := dbclient.readDoc(docMetadata.ID) if err != nil { return nil, "", err } - var addDocument = &QueryResult{ID: docMetadata.ID, Value: couchDoc.JSONValue, Attachments: couchDoc.Attachments} + var addDocument = &queryResult{id: docMetadata.ID, value: couchDoc.jsonValue, attachments: couchDoc.attachments} results = append(results, addDocument) } else { - logger.Debugf("[%s] Adding json docment for id: %s", dbclient.DBName, docMetadata.ID) - var addDocument = &QueryResult{ID: docMetadata.ID, Value: row, Attachments: nil} + logger.Debugf("[%s] Adding json docment for id: %s", dbclient.dbName, docMetadata.ID) + var addDocument = &queryResult{id: docMetadata.ID, value: row, attachments: nil} results = append(results, addDocument) } } - logger.Debugf("[%s] Exiting QueryDocuments()", dbclient.DBName) + logger.Debugf("[%s] Exiting QueryDocuments()", dbclient.dbName) return results, jsonResponse.Bookmark, nil } -// ListIndex method lists the defined indexes for a database -func (dbclient *CouchDatabase) ListIndex() ([]*IndexResult, error) { +// listIndex method lists the defined indexes for a database +func (dbclient *couchDatabase) listIndex() ([]*indexResult, error) { //IndexDefinition contains the definition for a couchdb index type indexDefinition struct { @@ -1216,17 +1175,17 @@ func (dbclient *CouchDatabase) ListIndex() ([]*IndexResult, error) { Indexes []indexDefinition `json:"indexes"` } - dbName := dbclient.DBName + dbName := dbclient.dbName logger.Debug("[%s] Entering ListIndex()", dbName) - indexURL, err := url.Parse(dbclient.CouchInstance.URL()) + indexURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodGet, "ListIndex", indexURL, nil, "", "", maxRetries, true, nil, "_index") if err != nil { @@ -1247,7 +1206,7 @@ func (dbclient *CouchDatabase) ListIndex() ([]*IndexResult, error) { return nil, errors.Wrap(err2, "error unmarshalling json data") } - var results []*IndexResult + var results []*indexResult for _, row := range jsonResponse.Indexes { @@ -1259,37 +1218,37 @@ func (dbclient *CouchDatabase) ListIndex() ([]*IndexResult, error) { designDoc = s[1] //Add the index definition to the results - var addIndexResult = &IndexResult{DesignDocument: designDoc, Name: row.Name, Definition: fmt.Sprintf("%s", row.Definition)} + var addIndexResult = &indexResult{DesignDocument: designDoc, Name: row.Name, Definition: fmt.Sprintf("%s", row.Definition)} results = append(results, addIndexResult) } } - logger.Debugf("[%s] Exiting ListIndex()", dbclient.DBName) + logger.Debugf("[%s] Exiting ListIndex()", dbclient.dbName) return results, nil } -// CreateIndex method provides a function creating an index -func (dbclient *CouchDatabase) CreateIndex(indexdefinition string) (*CreateIndexResponse, error) { - dbName := dbclient.DBName +// createIndex method provides a function creating an index +func (dbclient *couchDatabase) createIndex(indexdefinition string) (*createIndexResponse, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering CreateIndex() indexdefinition=%s", dbName, indexdefinition) //Test to see if this is a valid JSON - if IsJSON(indexdefinition) != true { + if isJSON(indexdefinition) != true { return nil, errors.New("JSON format is not valid") } - indexURL, err := url.Parse(dbclient.CouchInstance.URL()) + indexURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodPost, "CreateIndex", indexURL, []byte(indexdefinition), "", "", maxRetries, true, nil, "_index") if err != nil { @@ -1307,7 +1266,7 @@ func (dbclient *CouchDatabase) CreateIndex(indexdefinition string) (*CreateIndex return nil, errors.Wrap(err, "error reading response body") } - couchDBReturn := &CreateIndexResponse{} + couchDBReturn := &createIndexResponse{} jsonBytes := []byte(respBody) @@ -1319,31 +1278,31 @@ func (dbclient *CouchDatabase) CreateIndex(indexdefinition string) (*CreateIndex if couchDBReturn.Result == "created" { - logger.Infof("Created CouchDB index [%s] in state database [%s] using design document [%s]", couchDBReturn.Name, dbclient.DBName, couchDBReturn.ID) + logger.Infof("Created CouchDB index [%s] in state database [%s] using design document [%s]", couchDBReturn.Name, dbclient.dbName, couchDBReturn.ID) return couchDBReturn, nil } - logger.Infof("Updated CouchDB index [%s] in state database [%s] using design document [%s]", couchDBReturn.Name, dbclient.DBName, couchDBReturn.ID) + logger.Infof("Updated CouchDB index [%s] in state database [%s] using design document [%s]", couchDBReturn.Name, dbclient.dbName, couchDBReturn.ID) return couchDBReturn, nil } -// DeleteIndex method provides a function deleting an index -func (dbclient *CouchDatabase) DeleteIndex(designdoc, indexname string) error { - dbName := dbclient.DBName +// deleteIndex method provides a function deleting an index +func (dbclient *couchDatabase) deleteIndex(designdoc, indexname string) error { + dbName := dbclient.dbName logger.Debugf("[%s] Entering DeleteIndex() designdoc=%s indexname=%s", dbName, designdoc, indexname) - indexURL, err := url.Parse(dbclient.CouchInstance.URL()) + indexURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodDelete, "DeleteIndex", indexURL, nil, "", "", maxRetries, true, nil, "_index", designdoc, "json", indexname) if err != nil { @@ -1355,16 +1314,16 @@ func (dbclient *CouchDatabase) DeleteIndex(designdoc, indexname string) error { } -//WarmIndex method provides a function for warming a single index -func (dbclient *CouchDatabase) WarmIndex(designdoc, indexname string) error { - dbName := dbclient.DBName +//warmIndex method provides a function for warming a single index +func (dbclient *couchDatabase) warmIndex(designdoc, indexname string) error { + dbName := dbclient.dbName logger.Debugf("[%s] Entering WarmIndex() designdoc=%s indexname=%s", dbName, designdoc, indexname) - indexURL, err := url.Parse(dbclient.CouchInstance.URL()) + indexURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } queryParms := indexURL.Query() @@ -1373,7 +1332,7 @@ func (dbclient *CouchDatabase) WarmIndex(designdoc, indexname string) error { queryParms.Add("stale", "update_after") //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodGet, "WarmIndex", indexURL, nil, "", "", maxRetries, true, &queryParms, "_design", designdoc, "_view", indexname) if err != nil { @@ -1386,22 +1345,22 @@ func (dbclient *CouchDatabase) WarmIndex(designdoc, indexname string) error { } //runWarmIndexAllIndexes is a wrapper for WarmIndexAllIndexes to catch and report any errors -func (dbclient *CouchDatabase) runWarmIndexAllIndexes() { +func (dbclient *couchDatabase) runWarmIndexAllIndexes() { - err := dbclient.WarmIndexAllIndexes() + err := dbclient.warmIndexAllIndexes() if err != nil { logger.Errorf("Error detected during WarmIndexAllIndexes(): %+v", err) } } -//WarmIndexAllIndexes method provides a function for warming all indexes for a database -func (dbclient *CouchDatabase) WarmIndexAllIndexes() error { +//warmIndexAllIndexes method provides a function for warming all indexes for a database +func (dbclient *couchDatabase) warmIndexAllIndexes() error { - logger.Debugf("[%s] Entering WarmIndexAllIndexes()", dbclient.DBName) + logger.Debugf("[%s] Entering WarmIndexAllIndexes()", dbclient.dbName) //Retrieve all indexes - listResult, err := dbclient.ListIndex() + listResult, err := dbclient.listIndex() if err != nil { return err } @@ -1409,33 +1368,33 @@ func (dbclient *CouchDatabase) WarmIndexAllIndexes() error { //For each index definition, execute an index refresh for _, elem := range listResult { - err := dbclient.WarmIndex(elem.DesignDocument, elem.Name) + err := dbclient.warmIndex(elem.DesignDocument, elem.Name) if err != nil { return err } } - logger.Debugf("[%s] Exiting WarmIndexAllIndexes()", dbclient.DBName) + logger.Debugf("[%s] Exiting WarmIndexAllIndexes()", dbclient.dbName) return nil } -//GetDatabaseSecurity method provides function to retrieve the security config for a database -func (dbclient *CouchDatabase) GetDatabaseSecurity() (*DatabaseSecurity, error) { - dbName := dbclient.DBName +//getDatabaseSecurity method provides function to retrieve the security config for a database +func (dbclient *couchDatabase) getDatabaseSecurity() (*databaseSecurity, error) { + dbName := dbclient.dbName logger.Debugf("[%s] Entering GetDatabaseSecurity()", dbName) - securityURL, err := url.Parse(dbclient.CouchInstance.URL()) + securityURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodGet, "GetDatabaseSecurity", securityURL, nil, "", "", maxRetries, true, nil, "_security") @@ -1450,29 +1409,29 @@ func (dbclient *CouchDatabase) GetDatabaseSecurity() (*DatabaseSecurity, error) return nil, errors.Wrap(err, "error reading response body") } - var jsonResponse = &DatabaseSecurity{} + var jsonResponse = &databaseSecurity{} err2 := json.Unmarshal(jsonResponseRaw, jsonResponse) if err2 != nil { return nil, errors.Wrap(err2, "error unmarshalling json data") } - logger.Debugf("[%s] Exiting GetDatabaseSecurity()", dbclient.DBName) + logger.Debugf("[%s] Exiting GetDatabaseSecurity()", dbclient.dbName) return jsonResponse, nil } -//ApplyDatabaseSecurity method provides function to update the security config for a database -func (dbclient *CouchDatabase) ApplyDatabaseSecurity(databaseSecurity *DatabaseSecurity) error { - dbName := dbclient.DBName +//applyDatabaseSecurity method provides function to update the security config for a database +func (dbclient *couchDatabase) applyDatabaseSecurity(databaseSecurity *databaseSecurity) error { + dbName := dbclient.dbName logger.Debugf("[%s] Entering ApplyDatabaseSecurity()", dbName) - securityURL, err := url.Parse(dbclient.CouchInstance.URL()) + securityURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } //Ensure all of the arrays are initialized to empty arrays instead of nil @@ -1490,14 +1449,14 @@ func (dbclient *CouchDatabase) ApplyDatabaseSecurity(databaseSecurity *DatabaseS } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries databaseSecurityJSON, err := json.Marshal(databaseSecurity) if err != nil { return errors.Wrap(err, "error unmarshalling json data") } - logger.Debugf("[%s] Applying security to database: %s", dbclient.DBName, string(databaseSecurityJSON)) + logger.Debugf("[%s] Applying security to database: %s", dbclient.dbName, string(databaseSecurityJSON)) resp, _, err := dbclient.handleRequest(http.MethodPut, "ApplyDatabaseSecurity", securityURL, databaseSecurityJSON, "", "", maxRetries, true, nil, "_security") @@ -1506,22 +1465,22 @@ func (dbclient *CouchDatabase) ApplyDatabaseSecurity(databaseSecurity *DatabaseS } defer closeResponseBody(resp) - logger.Debugf("[%s] Exiting ApplyDatabaseSecurity()", dbclient.DBName) + logger.Debugf("[%s] Exiting ApplyDatabaseSecurity()", dbclient.dbName) return nil } -//BatchRetrieveDocumentMetadata - batch method to retrieve document metadata for a set of keys, -// including ID, couchdb revision number, and ledger version -func (dbclient *CouchDatabase) BatchRetrieveDocumentMetadata(keys []string) ([]*DocMetadata, error) { +//batchRetrieveDocumentMetadata - batch method to retrieve document metadata for a set of keys, +//including ID, couchdb revision number, and ledger version +func (dbclient *couchDatabase) batchRetrieveDocumentMetadata(keys []string) ([]*docMetadata, error) { - logger.Debugf("[%s] Entering BatchRetrieveDocumentMetadata() keys=%s", dbclient.DBName, keys) + logger.Debugf("[%s] Entering BatchRetrieveDocumentMetadata() keys=%s", dbclient.dbName, keys) - batchRetrieveURL, err := url.Parse(dbclient.CouchInstance.URL()) + batchRetrieveURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } queryParms := batchRetrieveURL.Query() @@ -1544,7 +1503,7 @@ func (dbclient *CouchDatabase) BatchRetrieveDocumentMetadata(keys []string) ([]* } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodPost, "BatchRetrieveDocumentMetadata", batchRetrieveURL, jsonKeys, "", "", maxRetries, true, &queryParms, "_all_docs") if err != nil { @@ -1555,7 +1514,7 @@ func (dbclient *CouchDatabase) BatchRetrieveDocumentMetadata(keys []string) ([]* if logger.IsEnabledFor(zapcore.DebugLevel) { dump, _ := httputil.DumpResponse(resp, false) // compact debug log by replacing carriage return / line feed with dashes to separate http headers - logger.Debugf("[%s] HTTP Response: %s", dbclient.DBName, bytes.Replace(dump, []byte{0x0d, 0x0a}, []byte{0x20, 0x7c, 0x20}, -1)) + logger.Debugf("[%s] HTTP Response: %s", dbclient.dbName, bytes.Replace(dump, []byte{0x0d, 0x0a}, []byte{0x20, 0x7c, 0x20}, -1)) } //handle as JSON document @@ -1564,29 +1523,29 @@ func (dbclient *CouchDatabase) BatchRetrieveDocumentMetadata(keys []string) ([]* return nil, errors.Wrap(err, "error reading response body") } - var jsonResponse = &BatchRetrieveDocMetadataResponse{} + var jsonResponse = &batchRetrieveDocMetadataResponse{} err2 := json.Unmarshal(jsonResponseRaw, &jsonResponse) if err2 != nil { return nil, errors.Wrap(err2, "error unmarshalling json data") } - docMetadataArray := []*DocMetadata{} + docMetadataArray := []*docMetadata{} for _, row := range jsonResponse.Rows { - docMetadata := &DocMetadata{ID: row.ID, Rev: row.DocMetadata.Rev, Version: row.DocMetadata.Version} + docMetadata := &docMetadata{ID: row.ID, Rev: row.DocMetadata.Rev, Version: row.DocMetadata.Version} docMetadataArray = append(docMetadataArray, docMetadata) } - logger.Debugf("[%s] Exiting BatchRetrieveDocumentMetadata()", dbclient.DBName) + logger.Debugf("[%s] Exiting BatchRetrieveDocumentMetadata()", dbclient.dbName) return docMetadataArray, nil } -//BatchUpdateDocuments - batch method to batch update documents -func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*BatchUpdateResponse, error) { - dbName := dbclient.DBName +//batchUpdateDocuments - batch method to batch update documents +func (dbclient *couchDatabase) batchUpdateDocuments(documents []*couchDoc) ([]*batchUpdateResponse, error) { + dbName := dbclient.dbName if logger.IsEnabledFor(zapcore.DebugLevel) { documentIdsString, err := printDocumentIds(documents) @@ -1597,10 +1556,10 @@ func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*B } } - batchUpdateURL, err := url.Parse(dbclient.CouchInstance.URL()) + batchUpdateURL, err := url.Parse(dbclient.couchInstance.url()) if err != nil { logger.Errorf("URL parse error: %s", err) - return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.CouchInstance.URL()) + return nil, errors.Wrapf(err, "error parsing CouchDB URL: %s", dbclient.couchInstance.url()) } documentMap := make(map[string]interface{}) @@ -1612,22 +1571,22 @@ func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*B //create a document map var document = make(map[string]interface{}) - //unmarshal the JSON component of the CouchDoc into the document - err = json.Unmarshal(jsonDocument.JSONValue, &document) + //unmarshal the JSON component of the couchDoc into the document + err = json.Unmarshal(jsonDocument.jsonValue, &document) if err != nil { return nil, errors.Wrap(err, "error unmarshalling json data") } //iterate through any attachments - if len(jsonDocument.Attachments) > 0 { + if len(jsonDocument.attachments) > 0 { //create a file attachment map fileAttachment := make(map[string]interface{}) - //for each attachment, create a Base64Attachment, name the attachment, + //for each attachment, create a base64Attachment, name the attachment, //add the content type and base64 encode the attachment - for _, attachment := range jsonDocument.Attachments { - fileAttachment[attachment.Name] = Base64Attachment{attachment.ContentType, + for _, attachment := range jsonDocument.attachments { + fileAttachment[attachment.Name] = base64Attachment{attachment.ContentType, base64.StdEncoding.EncodeToString(attachment.AttachmentBytes)} } @@ -1650,7 +1609,7 @@ func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*B } //get the number of retries - maxRetries := dbclient.CouchInstance.conf.MaxRetries + maxRetries := dbclient.couchInstance.conf.MaxRetries resp, _, err := dbclient.handleRequest(http.MethodPost, "BatchUpdateDocuments", batchUpdateURL, bulkDocsJSON, "", "", maxRetries, true, nil, "_bulk_docs") if err != nil { @@ -1661,7 +1620,7 @@ func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*B if logger.IsEnabledFor(zapcore.DebugLevel) { dump, _ := httputil.DumpResponse(resp, false) // compact debug log by replacing carriage return / line feed with dashes to separate http headers - logger.Debugf("[%s] HTTP Response: %s", dbclient.DBName, bytes.Replace(dump, []byte{0x0d, 0x0a}, []byte{0x20, 0x7c, 0x20}, -1)) + logger.Debugf("[%s] HTTP Response: %s", dbclient.dbName, bytes.Replace(dump, []byte{0x0d, 0x0a}, []byte{0x20, 0x7c, 0x20}, -1)) } //handle as JSON document @@ -1670,13 +1629,13 @@ func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*B return nil, errors.Wrap(err, "error reading response body") } - var jsonResponse = []*BatchUpdateResponse{} + var jsonResponse = []*batchUpdateResponse{} err2 := json.Unmarshal(jsonResponseRaw, &jsonResponse) if err2 != nil { return nil, errors.Wrap(err2, "error unmarshalling json data") } - logger.Debugf("[%s] Exiting BatchUpdateDocuments() _bulk_docs response=[%s]", dbclient.DBName, string(jsonResponseRaw)) + logger.Debugf("[%s] Exiting BatchUpdateDocuments() _bulk_docs response=[%s]", dbclient.dbName, string(jsonResponseRaw)) return jsonResponse, nil @@ -1686,13 +1645,13 @@ func (dbclient *CouchDatabase) BatchUpdateDocuments(documents []*CouchDoc) ([]*B //a retry for document revision conflict errors, //which may be detected during saves or deletes that timed out from client http perspective, //but which eventually succeeded in couchdb -func (dbclient *CouchDatabase) handleRequestWithRevisionRetry(id, method, dbName, functionName string, connectURL *url.URL, data []byte, rev string, - multipartBoundary string, maxRetries int, keepConnectionOpen bool, queryParms *url.Values) (*http.Response, *DBReturn, error) { +func (dbclient *couchDatabase) handleRequestWithRevisionRetry(id, method, dbName, functionName string, connectURL *url.URL, data []byte, rev string, + multipartBoundary string, maxRetries int, keepConnectionOpen bool, queryParms *url.Values) (*http.Response, *dbReturn, error) { //Initialize a flag for the revision conflict revisionConflictDetected := false var resp *http.Response - var couchDBReturn *DBReturn + var couchDBReturn *dbReturn var errResp error //attempt the http request for the max number of retries @@ -1707,7 +1666,7 @@ func (dbclient *CouchDatabase) handleRequestWithRevisionRetry(id, method, dbName } //handle the request for saving/deleting the couchdb data - resp, couchDBReturn, errResp = dbclient.CouchInstance.handleRequest(context.Background(), method, dbName, functionName, connectURL, + resp, couchDBReturn, errResp = dbclient.couchInstance.handleRequest(context.Background(), method, dbName, functionName, connectURL, data, rev, multipartBoundary, maxRetries, keepConnectionOpen, queryParms, id) //If there was a 409 conflict error during the save/delete, log it and retry it. @@ -1724,11 +1683,11 @@ func (dbclient *CouchDatabase) handleRequestWithRevisionRetry(id, method, dbName return resp, couchDBReturn, errResp } -func (dbclient *CouchDatabase) handleRequest(method, functionName string, connectURL *url.URL, data []byte, rev, multipartBoundary string, - maxRetries int, keepConnectionOpen bool, queryParms *url.Values, pathElements ...string) (*http.Response, *DBReturn, error) { +func (dbclient *couchDatabase) handleRequest(method, functionName string, connectURL *url.URL, data []byte, rev, multipartBoundary string, + maxRetries int, keepConnectionOpen bool, queryParms *url.Values, pathElements ...string) (*http.Response, *dbReturn, error) { - return dbclient.CouchInstance.handleRequest(context.Background(), - method, dbclient.DBName, functionName, connectURL, data, rev, multipartBoundary, + return dbclient.couchInstance.handleRequest(context.Background(), + method, dbclient.dbName, functionName, connectURL, data, rev, multipartBoundary, maxRetries, keepConnectionOpen, queryParms, pathElements..., ) } @@ -1737,15 +1696,15 @@ func (dbclient *CouchDatabase) handleRequest(method, functionName string, connec // If it returns an error, it ensures that the response body is closed, else it is the // callee's responsibility to close response correctly. // Any http error or CouchDB error (4XX or 500) will result in a golang error getting returned -func (couchInstance *CouchInstance) handleRequest(ctx context.Context, method, dbName, functionName string, connectURL *url.URL, data []byte, rev string, - multipartBoundary string, maxRetries int, keepConnectionOpen bool, queryParms *url.Values, pathElements ...string) (*http.Response, *DBReturn, error) { +func (couchInstance *couchInstance) handleRequest(ctx context.Context, method, dbName, functionName string, connectURL *url.URL, data []byte, rev string, + multipartBoundary string, maxRetries int, keepConnectionOpen bool, queryParms *url.Values, pathElements ...string) (*http.Response, *dbReturn, error) { logger.Debugf("Entering handleRequest() method=%s url=%v dbName=%s", method, connectURL, dbName) //create the return objects for couchDB var resp *http.Response var errResp error - couchDBReturn := &DBReturn{} + couchDBReturn := &dbReturn{} defer couchInstance.recordMetric(time.Now(), dbName, functionName, couchDBReturn) //set initial wait duration for retries @@ -1903,7 +1862,7 @@ func (couchInstance *CouchInstance) handleRequest(ctx context.Context, method, d //this is a structure and StatusCode is an int //This is meant to provide a more graceful error if this should occur if invalidCouchDBReturn(resp, errResp) { - return nil, nil, errors.New("unable to connect to CouchDB, check the hostname and port.") + return nil, nil, errors.New("unable to connect to CouchDB, check the hostname and port") } //set the return code for the couchDB request @@ -1929,8 +1888,8 @@ func (couchInstance *CouchInstance) handleRequest(ctx context.Context, method, d return resp, couchDBReturn, nil } -func (ci *CouchInstance) recordMetric(startTime time.Time, dbName, api string, couchDBReturn *DBReturn) { - ci.stats.observeProcessingTime(startTime, dbName, api, strconv.Itoa(couchDBReturn.StatusCode)) +func (couchInstance *couchInstance) recordMetric(startTime time.Time, dbName, api string, couchDBReturn *dbReturn) { + couchInstance.stats.observeProcessingTime(startTime, dbName, api, strconv.Itoa(couchDBReturn.StatusCode)) } //invalidCouchDBResponse checks to make sure either a valid response or error is returned @@ -1941,8 +1900,8 @@ func invalidCouchDBReturn(resp *http.Response, errResp error) bool { return false } -//IsJSON tests a string to determine if a valid JSON -func IsJSON(s string) bool { +//isJSON tests a string to determine if a valid JSON +func isJSON(s string) bool { var js map[string]interface{} return json.Unmarshal([]byte(s), &js) == nil } @@ -1975,13 +1934,13 @@ func encodeForJSON(str string) (string, error) { // printDocumentIds is a convenience method to print readable log entries for arrays of pointers // to couch document IDs -func printDocumentIds(documentPointers []*CouchDoc) (string, error) { +func printDocumentIds(documentPointers []*couchDoc) (string, error) { documentIds := []string{} for _, documentPointer := range documentPointers { - docMetadata := &DocMetadata{} - err := json.Unmarshal(documentPointer.JSONValue, &docMetadata) + docMetadata := &docMetadata{} + err := json.Unmarshal(documentPointer.jsonValue, &docMetadata) if err != nil { return "", errors.Wrap(err, "error unmarshalling json data") } diff --git a/core/ledger/util/couchdb/couchdb_test.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test.go similarity index 66% rename from core/ledger/util/couchdb/couchdb_test.go rename to core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test.go index b24468342dc..db2d1eb3f41 100644 --- a/core/ledger/util/couchdb/couchdb_test.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test.go @@ -4,23 +4,20 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package couchdb +package statecouchdb import ( "context" "encoding/json" - "fmt" "net/http" "net/url" - "os" "strings" "testing" "time" "unicode/utf8" - "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/metrics/disabled" - "github.com/hyperledger/fabric/core/ledger/util/couchdbtest" + "github.com/hyperledger/fabric/core/ledger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -30,19 +27,6 @@ const badParseConnectURL = "http://host.com|5432" const updateDocumentConflictError = "conflict" const updateDocumentConflictReason = "Document update conflict." -func cleanup(database string) error { - //create a new connection - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) - if err != nil { - fmt.Println("Unexpected error", err) - return err - } - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} - //drop the test database - db.DropDatabase() - return nil -} - type Asset struct { ID string `json:"_id"` Rev string `json:"_rev"` @@ -54,47 +38,32 @@ type Asset struct { var assetJSON = []byte(`{"asset_name":"marble1","color":"blue","size":"35","owner":"jerry"}`) -var testAddress string -var cleanupCouchDB = func() {} - -func testConfig() *Config { - return &Config{ - Address: testAddress, +func testConfig() *ledger.CouchDBConfig { + return &ledger.CouchDBConfig{ + Address: "", Username: "", Password: "", MaxRetries: 3, MaxRetriesOnStartup: 20, RequestTimeout: 35 * time.Second, - CreateGlobalChangesDB: true, - } -} - -func TestMain(m *testing.M) { - //set the logging level to DEBUG to test debug only code - flogging.ActivateSpec("couchdb=debug") - - rc := m.Run() - cleanupCouchDB() - - os.Exit(rc) -} - -func startCouchDB() { - if testAddress == "" { - testAddress, cleanupCouchDB = couchdbtest.CouchDBSetup((nil)) + CreateGlobalChangesDB: false, } } func TestDBBadConnectionDef(t *testing.T) { - startCouchDB() - config := testConfig() - config.Address = badParseConnectURL - _, err := CreateCouchInstance(config, &disabled.Provider{}) + config := &ledger.CouchDBConfig{ + Address: badParseConnectURL, + Username: "", + Password: "", + MaxRetries: 3, + MaxRetriesOnStartup: 3, + RequestTimeout: 35 * time.Second, + } + _, err := createCouchInstance(config, &disabled.Provider{}) assert.Error(t, err, "Did not receive error when trying to create database connection definition with a bad hostname") } func TestEncodePathElement(t *testing.T) { - encodedString := encodePathElement("testelement") assert.Equal(t, "testelement", encodedString) @@ -109,31 +78,32 @@ func TestEncodePathElement(t *testing.T) { encodedString = encodePathElement("/test+ element:") assert.Equal(t, "%2Ftest%2B%20element:", encodedString) - } func TestHealthCheck(t *testing.T) { - startCouchDB() - client := &http.Client{} - config := testConfig() - config.Address = testAddress + "1" - badCouchDBInstance := CouchInstance{ - conf: config, + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) + + configWithIncorrectAddress := testConfig() + client := &http.Client{} + badCouchDBInstance := couchInstance{ + conf: configWithIncorrectAddress, client: client, stats: newStats(&disabled.Provider{}), } - err := badCouchDBInstance.HealthCheck(context.Background()) + err := badCouchDBInstance.healthCheck(context.Background()) assert.Error(t, err, "Health check should result in an error if unable to connect to couch db") assert.Contains(t, err.Error(), "failed to connect to couch db") //Create a good couchdb instance - goodCouchDBInstance := CouchInstance{ - conf: testConfig(), + goodCouchDBInstance := couchInstance{ + conf: config, client: client, stats: newStats(&disabled.Provider{}), } - err = goodCouchDBInstance.HealthCheck(context.Background()) + err = goodCouchDBInstance.healthCheck(context.Background()) assert.NoError(t, err) } @@ -142,8 +112,8 @@ func TestBadCouchDBInstance(t *testing.T) { client := &http.Client{} //Create a bad couchdb instance - badCouchDBInstance := CouchInstance{ - conf: &Config{ + badCouchDBInstance := couchInstance{ + conf: &ledger.CouchDBConfig{ Address: badParseConnectURL, Username: "", Password: "", @@ -156,190 +126,198 @@ func TestBadCouchDBInstance(t *testing.T) { } //Create a bad CouchDatabase - badDB := CouchDatabase{&badCouchDBInstance, "baddb", 1} + badDB := couchDatabase{&badCouchDBInstance, "baddb", 1} - //Test CreateCouchDatabase with bad connection - _, err := CreateCouchDatabase(&badCouchDBInstance, "baddbtest") - assert.Error(t, err, "Error should have been thrown with CreateCouchDatabase and invalid connection") + //Test createCouchDatabase with bad connection + _, err := createCouchDatabase(&badCouchDBInstance, "baddbtest") + assert.Error(t, err, "Error should have been thrown with createCouchDatabase and invalid connection") - //Test CreateSystemDatabasesIfNotExist with bad connection - err = CreateSystemDatabasesIfNotExist(&badCouchDBInstance) - assert.Error(t, err, "Error should have been thrown with CreateSystemDatabasesIfNotExist and invalid connection") + //Test createSystemDatabasesIfNotExist with bad connection + err = createSystemDatabasesIfNotExist(&badCouchDBInstance) + assert.Error(t, err, "Error should have been thrown with createSystemDatabasesIfNotExist and invalid connection") - //Test CreateDatabaseIfNotExist with bad connection - err = badDB.CreateDatabaseIfNotExist() - assert.Error(t, err, "Error should have been thrown with CreateDatabaseIfNotExist and invalid connection") + //Test createDatabaseIfNotExist with bad connection + err = badDB.createDatabaseIfNotExist() + assert.Error(t, err, "Error should have been thrown with createDatabaseIfNotExist and invalid connection") - //Test GetDatabaseInfo with bad connection - _, _, err = badDB.GetDatabaseInfo() - assert.Error(t, err, "Error should have been thrown with GetDatabaseInfo and invalid connection") + //Test getDatabaseInfo with bad connection + _, _, err = badDB.getDatabaseInfo() + assert.Error(t, err, "Error should have been thrown with getDatabaseInfo and invalid connection") - //Test VerifyCouchConfig with bad connection - _, _, err = badCouchDBInstance.VerifyCouchConfig() - assert.Error(t, err, "Error should have been thrown with VerifyCouchConfig and invalid connection") + //Test verifyCouchConfig with bad connection + _, _, err = badCouchDBInstance.verifyCouchConfig() + assert.Error(t, err, "Error should have been thrown with verifyCouchConfig and invalid connection") - //Test EnsureFullCommit with bad connection - _, err = badDB.EnsureFullCommit() - assert.Error(t, err, "Error should have been thrown with EnsureFullCommit and invalid connection") + //Test ensureFullCommit with bad connection + _, err = badDB.ensureFullCommit() + assert.Error(t, err, "Error should have been thrown with ensureFullCommit and invalid connection") - //Test DropDatabase with bad connection - _, err = badDB.DropDatabase() - assert.Error(t, err, "Error should have been thrown with DropDatabase and invalid connection") + //Test dropDatabase with bad connection + _, err = badDB.dropDatabase() + assert.Error(t, err, "Error should have been thrown with dropDatabase and invalid connection") - //Test ReadDoc with bad connection - _, _, err = badDB.ReadDoc("1") - assert.Error(t, err, "Error should have been thrown with ReadDoc and invalid connection") + //Test readDoc with bad connection + _, _, err = badDB.readDoc("1") + assert.Error(t, err, "Error should have been thrown with readDoc and invalid connection") - //Test SaveDoc with bad connection - _, err = badDB.SaveDoc("1", "1", nil) - assert.Error(t, err, "Error should have been thrown with SaveDoc and invalid connection") + //Test saveDoc with bad connection + _, err = badDB.saveDoc("1", "1", nil) + assert.Error(t, err, "Error should have been thrown with saveDoc and invalid connection") - //Test DeleteDoc with bad connection - err = badDB.DeleteDoc("1", "1") - assert.Error(t, err, "Error should have been thrown with DeleteDoc and invalid connection") + //Test deleteDoc with bad connection + err = badDB.deleteDoc("1", "1") + assert.Error(t, err, "Error should have been thrown with deleteDoc and invalid connection") - //Test ReadDocRange with bad connection - _, _, err = badDB.ReadDocRange("1", "2", 1000) - assert.Error(t, err, "Error should have been thrown with ReadDocRange and invalid connection") + //Test readDocRange with bad connection + _, _, err = badDB.readDocRange("1", "2", 1000) + assert.Error(t, err, "Error should have been thrown with readDocRange and invalid connection") - //Test QueryDocuments with bad connection - _, _, err = badDB.QueryDocuments("1") - assert.Error(t, err, "Error should have been thrown with QueryDocuments and invalid connection") + //Test queryDocuments with bad connection + _, _, err = badDB.queryDocuments("1") + assert.Error(t, err, "Error should have been thrown with queryDocuments and invalid connection") - //Test BatchRetrieveDocumentMetadata with bad connection - _, err = badDB.BatchRetrieveDocumentMetadata(nil) - assert.Error(t, err, "Error should have been thrown with BatchRetrieveDocumentMetadata and invalid connection") + //Test batchRetrieveDocumentMetadata with bad connection + _, err = badDB.batchRetrieveDocumentMetadata(nil) + assert.Error(t, err, "Error should have been thrown with batchRetrieveDocumentMetadata and invalid connection") - //Test BatchUpdateDocuments with bad connection - _, err = badDB.BatchUpdateDocuments(nil) - assert.Error(t, err, "Error should have been thrown with BatchUpdateDocuments and invalid connection") + //Test batchUpdateDocuments with bad connection + _, err = badDB.batchUpdateDocuments(nil) + assert.Error(t, err, "Error should have been thrown with batchUpdateDocuments and invalid connection") - //Test ListIndex with bad connection - _, err = badDB.ListIndex() - assert.Error(t, err, "Error should have been thrown with ListIndex and invalid connection") + //Test listIndex with bad connection + _, err = badDB.listIndex() + assert.Error(t, err, "Error should have been thrown with listIndex and invalid connection") - //Test CreateIndex with bad connection - _, err = badDB.CreateIndex("") - assert.Error(t, err, "Error should have been thrown with CreateIndex and invalid connection") + //Test createIndex with bad connection + _, err = badDB.createIndex("") + assert.Error(t, err, "Error should have been thrown with createIndex and invalid connection") - //Test DeleteIndex with bad connection - err = badDB.DeleteIndex("", "") - assert.Error(t, err, "Error should have been thrown with DeleteIndex and invalid connection") + //Test deleteIndex with bad connection + err = badDB.deleteIndex("", "") + assert.Error(t, err, "Error should have been thrown with deleteIndex and invalid connection") } func TestDBCreateSaveWithoutRevision(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbcreatesavewithoutrevision" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Save the test document - _, saveerr := db.SaveDoc("2", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr := db.saveDoc("2", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") } func TestDBCreateEnsureFullCommit(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbensurefullcommit" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Save the test document - _, saveerr := db.SaveDoc("2", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr := db.saveDoc("2", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") //Ensure a full commit - _, commiterr := db.EnsureFullCommit() + _, commiterr := db.ensureFullCommit() assert.NoError(t, commiterr, "Error when trying to ensure a full commit") } func TestIsEmpty(t *testing.T) { - startCouchDB() - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) + + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err) - isEmpty, err := couchInstance.IsEmpty(nil) + ignore := []string{"_global_changes", "_replicator", "_users", "fabric__internal"} + isEmpty, err := couchInstance.isEmpty(ignore) require.NoError(t, err) require.True(t, isEmpty) testdbs := []string{"testdb1", "testdb2"} - defer func() { - for _, d := range testdbs { - cleanup(d) - } - }() + couchDBEnv.cleanup(config) for _, d := range testdbs { - db := CouchDatabase{CouchInstance: couchInstance, DBName: d} - require.NoError(t, db.CreateDatabaseIfNotExist()) + db := couchDatabase{couchInstance: couchInstance, dbName: d} + require.NoError(t, db.createDatabaseIfNotExist()) } - isEmpty, err = couchInstance.IsEmpty(nil) + isEmpty, err = couchInstance.isEmpty(ignore) require.NoError(t, err) require.False(t, isEmpty) - ignore := []string{"testdb1"} - isEmpty, err = couchInstance.IsEmpty(ignore) + ignore = append(ignore, "testdb1") + isEmpty, err = couchInstance.isEmpty(ignore) require.NoError(t, err) require.False(t, isEmpty) - ignore = []string{"testdb1", "testdb2"} - isEmpty, err = couchInstance.IsEmpty(ignore) + ignore = append(ignore, "testdb2") + isEmpty, err = couchInstance.isEmpty(ignore) require.NoError(t, err) require.True(t, isEmpty) + defaultAddress := config.Address + defaultMaxRetries := config.MaxRetries couchInstance.conf.Address = "junk" couchInstance.conf.MaxRetries = 0 - isEmpty, err = couchInstance.IsEmpty(ignore) + isEmpty, err = couchInstance.isEmpty(ignore) require.Error(t, err) require.Regexp(t, `unable to connect to CouchDB, check the hostname and port: http error calling couchdb: Get "?http://junk/_all_dbs"?`, err.Error()) + config.Address = defaultAddress + config.MaxRetries = defaultMaxRetries } func TestDBBadDatabaseName(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) //create a new instance and database object using a valid database name mixed case - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - _, dberr := CreateCouchDatabase(couchInstance, "testDB") + _, dberr := createCouchDatabase(couchInstance, "testDB") assert.Error(t, dberr, "Error should have been thrown for an invalid db name") //create a new instance and database object using a valid database name letters and numbers - couchInstance, err = CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err = createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - _, dberr = CreateCouchDatabase(couchInstance, "test132") + _, dberr = createCouchDatabase(couchInstance, "test132") assert.NoError(t, dberr, "Error when testing a valid database name") //create a new instance and database object using a valid database name - special characters - couchInstance, err = CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err = createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - _, dberr = CreateCouchDatabase(couchInstance, "test1234~!@#$%^&*()[]{}.") + _, dberr = createCouchDatabase(couchInstance, "test1234~!@#$%^&*()[]{}.") assert.Error(t, dberr, "Error should have been thrown for an invalid db name") //create a new instance and database object using a invalid database name - too long /* - couchInstance, err = CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err = createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - _, dberr = CreateCouchDatabase(couchInstance, "a12345678901234567890123456789012345678901234"+ + _, dberr = createCouchDatabase(couchInstance, "a12345678901234567890123456789012345678901234"+ "56789012345678901234567890123456789012345678901234567890123456789012345678901234567890"+ "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456"+ "78901234567890123456789012345678901234567890") @@ -348,97 +326,104 @@ func TestDBBadDatabaseName(t *testing.T) { } func TestDBBadConnection(t *testing.T) { - startCouchDB() //create a new instance and database object //Limit the maxRetriesOnStartup to 3 in order to reduce time for the failure - config := testConfig() - config.Address = badConnectURL - config.MaxRetriesOnStartup = 3 - _, err := CreateCouchInstance(config, &disabled.Provider{}) + config := &ledger.CouchDBConfig{ + Address: badConnectURL, + Username: "", + Password: "", + MaxRetries: 3, + MaxRetriesOnStartup: 3, + RequestTimeout: 35 * time.Second, + } + _, err := createCouchInstance(config, &disabled.Provider{}) assert.Error(t, err, "Error should have been thrown for a bad connection") } func TestBadDBCredentials(t *testing.T) { - startCouchDB() - database := "testdbbadcredentials" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) badConfig := testConfig() + badConfig.Address = config.Address badConfig.Username = "fred" badConfig.Password = "fred" //create a new instance and database object - _, err = CreateCouchInstance(badConfig, &disabled.Provider{}) + _, err := createCouchInstance(badConfig, &disabled.Provider{}) assert.Error(t, err, "Error should have been thrown for bad credentials") - } func TestDBCreateDatabaseAndPersist(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) //Test create and persist with default configured maxRetries - testDBCreateDatabaseAndPersist(t, testConfig().MaxRetries) + testDBCreateDatabaseAndPersist(t, config) + couchDBEnv.cleanup(config) //Test create and persist with 0 retries - testDBCreateDatabaseAndPersist(t, 0) + defaultMaxRetries := config.MaxRetries + config.MaxRetries = 0 + testDBCreateDatabaseAndPersist(t, config) + config.MaxRetries = defaultMaxRetries + couchDBEnv.cleanup(config) //Test batch operations with default configured maxRetries - testBatchBatchOperations(t, testConfig().MaxRetries) + testBatchBatchOperations(t, config) + couchDBEnv.cleanup(config) //Test batch operations with 0 retries - testBatchBatchOperations(t, 0) - + testBatchBatchOperations(t, config) } -func testDBCreateDatabaseAndPersist(t *testing.T, maxRetries int) { - +func testDBCreateDatabaseAndPersist(t *testing.T, config *ledger.CouchDBConfig) { database := "testdbcreatedatabaseandpersist" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb := db.GetDatabaseInfo() + dbResp, _, errdb := db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve database information") assert.Equal(t, database, dbResp.DbName) //Save the test document - _, saveerr := db.SaveDoc("idWith/slash", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr := db.saveDoc("idWith/slash", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") //Retrieve the test document - dbGetResp, _, geterr := db.ReadDoc("idWith/slash") + dbGetResp, _, geterr := db.readDoc("idWith/slash") assert.NoError(t, geterr, "Error when trying to retrieve a document") //Unmarshal the document to Asset structure assetResp := &Asset{} - geterr = json.Unmarshal(dbGetResp.JSONValue, &assetResp) + geterr = json.Unmarshal(dbGetResp.jsonValue, &assetResp) assert.NoError(t, geterr, "Error when trying to retrieve a document") //Verify the owner retrieved matches assert.Equal(t, "jerry", assetResp.Owner) //Save the test document - _, saveerr = db.SaveDoc("1", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr = db.saveDoc("1", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") //Retrieve the test document - dbGetResp, _, geterr = db.ReadDoc("1") + dbGetResp, _, geterr = db.readDoc("1") assert.NoError(t, geterr, "Error when trying to retrieve a document") //Unmarshal the document to Asset structure assetResp = &Asset{} - geterr = json.Unmarshal(dbGetResp.JSONValue, &assetResp) + geterr = json.Unmarshal(dbGetResp.jsonValue, &assetResp) assert.NoError(t, geterr, "Error when trying to retrieve a document") //Verify the owner retrieved matches @@ -451,87 +436,87 @@ func testDBCreateDatabaseAndPersist(t *testing.T, maxRetries int) { assetDocUpdated, _ := json.Marshal(assetResp) //Save the updated test document - _, saveerr = db.SaveDoc("1", "", &CouchDoc{JSONValue: assetDocUpdated, Attachments: nil}) + _, saveerr = db.saveDoc("1", "", &couchDoc{jsonValue: assetDocUpdated, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save the updated document") //Retrieve the updated test document - dbGetResp, _, geterr = db.ReadDoc("1") + dbGetResp, _, geterr = db.readDoc("1") assert.NoError(t, geterr, "Error when trying to retrieve a document") //Unmarshal the document to Asset structure assetResp = &Asset{} - json.Unmarshal(dbGetResp.JSONValue, &assetResp) + json.Unmarshal(dbGetResp.jsonValue, &assetResp) //Assert that the update was saved and retrieved assert.Equal(t, "bob", assetResp.Owner) testBytes2 := []byte(`test attachment 2`) - attachment2 := &AttachmentInfo{} + attachment2 := &attachmentInfo{} attachment2.AttachmentBytes = testBytes2 attachment2.ContentType = "application/octet-stream" attachment2.Name = "data" - attachments2 := []*AttachmentInfo{} + attachments2 := []*attachmentInfo{} attachments2 = append(attachments2, attachment2) //Save the test document with an attachment - _, saveerr = db.SaveDoc("2", "", &CouchDoc{JSONValue: nil, Attachments: attachments2}) + _, saveerr = db.saveDoc("2", "", &couchDoc{jsonValue: nil, attachments: attachments2}) assert.NoError(t, saveerr, "Error when trying to save a document") //Retrieve the test document with attachments - dbGetResp, _, geterr = db.ReadDoc("2") + dbGetResp, _, geterr = db.readDoc("2") assert.NoError(t, geterr, "Error when trying to retrieve a document") //verify the text from the attachment is correct - testattach := dbGetResp.Attachments[0].AttachmentBytes + testattach := dbGetResp.attachments[0].AttachmentBytes assert.Equal(t, testBytes2, testattach) testBytes3 := []byte{} - attachment3 := &AttachmentInfo{} + attachment3 := &attachmentInfo{} attachment3.AttachmentBytes = testBytes3 attachment3.ContentType = "application/octet-stream" attachment3.Name = "data" - attachments3 := []*AttachmentInfo{} + attachments3 := []*attachmentInfo{} attachments3 = append(attachments3, attachment3) //Save the test document with a zero length attachment - _, saveerr = db.SaveDoc("3", "", &CouchDoc{JSONValue: nil, Attachments: attachments3}) + _, saveerr = db.saveDoc("3", "", &couchDoc{jsonValue: nil, attachments: attachments3}) assert.NoError(t, saveerr, "Error when trying to save a document") //Retrieve the test document with attachments - dbGetResp, _, geterr = db.ReadDoc("3") + dbGetResp, _, geterr = db.readDoc("3") assert.NoError(t, geterr, "Error when trying to retrieve a document") //verify the text from the attachment is correct, zero bytes - testattach = dbGetResp.Attachments[0].AttachmentBytes + testattach = dbGetResp.attachments[0].AttachmentBytes assert.Equal(t, testBytes3, testattach) testBytes4a := []byte(`test attachment 4a`) - attachment4a := &AttachmentInfo{} + attachment4a := &attachmentInfo{} attachment4a.AttachmentBytes = testBytes4a attachment4a.ContentType = "application/octet-stream" attachment4a.Name = "data1" testBytes4b := []byte(`test attachment 4b`) - attachment4b := &AttachmentInfo{} + attachment4b := &attachmentInfo{} attachment4b.AttachmentBytes = testBytes4b attachment4b.ContentType = "application/octet-stream" attachment4b.Name = "data2" - attachments4 := []*AttachmentInfo{} + attachments4 := []*attachmentInfo{} attachments4 = append(attachments4, attachment4a) attachments4 = append(attachments4, attachment4b) //Save the updated test document with multiple attachments - _, saveerr = db.SaveDoc("4", "", &CouchDoc{JSONValue: assetJSON, Attachments: attachments4}) + _, saveerr = db.saveDoc("4", "", &couchDoc{jsonValue: assetJSON, attachments: attachments4}) assert.NoError(t, saveerr, "Error when trying to save the updated document") //Retrieve the test document with attachments - dbGetResp, _, geterr = db.ReadDoc("4") + dbGetResp, _, geterr = db.readDoc("4") assert.NoError(t, geterr, "Error when trying to retrieve a document") - for _, attach4 := range dbGetResp.Attachments { + for _, attach4 := range dbGetResp.attachments { currentName := attach4.Name if currentName == "data1" { @@ -544,30 +529,30 @@ func testDBCreateDatabaseAndPersist(t *testing.T, maxRetries int) { } testBytes5a := []byte(`test attachment 5a`) - attachment5a := &AttachmentInfo{} + attachment5a := &attachmentInfo{} attachment5a.AttachmentBytes = testBytes5a attachment5a.ContentType = "application/octet-stream" attachment5a.Name = "data1" testBytes5b := []byte{} - attachment5b := &AttachmentInfo{} + attachment5b := &attachmentInfo{} attachment5b.AttachmentBytes = testBytes5b attachment5b.ContentType = "application/octet-stream" attachment5b.Name = "data2" - attachments5 := []*AttachmentInfo{} + attachments5 := []*attachmentInfo{} attachments5 = append(attachments5, attachment5a) attachments5 = append(attachments5, attachment5b) //Save the updated test document with multiple attachments and zero length attachments - _, saveerr = db.SaveDoc("5", "", &CouchDoc{JSONValue: assetJSON, Attachments: attachments5}) + _, saveerr = db.saveDoc("5", "", &couchDoc{jsonValue: assetJSON, attachments: attachments5}) assert.NoError(t, saveerr, "Error when trying to save the updated document") //Retrieve the test document with attachments - dbGetResp, _, geterr = db.ReadDoc("5") + dbGetResp, _, geterr = db.readDoc("5") assert.NoError(t, geterr, "Error when trying to retrieve a document") - for _, attach5 := range dbGetResp.Attachments { + for _, attach5 := range dbGetResp.attachments { currentName := attach5.Name if currentName == "data1" { @@ -580,162 +565,167 @@ func testDBCreateDatabaseAndPersist(t *testing.T, maxRetries int) { } //Attempt to save the document with an invalid id - _, saveerr = db.SaveDoc(string([]byte{0xff, 0xfe, 0xfd}), "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr = db.saveDoc(string([]byte{0xff, 0xfe, 0xfd}), "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.Error(t, saveerr, "Error should have been thrown when saving a document with an invalid ID") //Attempt to read a document with an invalid id - _, _, readerr := db.ReadDoc(string([]byte{0xff, 0xfe, 0xfd})) + _, _, readerr := db.readDoc(string([]byte{0xff, 0xfe, 0xfd})) assert.Error(t, readerr, "Error should have been thrown when reading a document with an invalid ID") //Drop the database - _, errdbdrop := db.DropDatabase() + _, errdbdrop := db.dropDatabase() assert.NoError(t, errdbdrop, "Error dropping database") //Make sure an error is thrown for getting info for a missing database - _, _, errdbinfo := db.GetDatabaseInfo() + _, _, errdbinfo := db.getDatabaseInfo() assert.Error(t, errdbinfo, "Error should have been thrown for missing database") //Attempt to save a document to a deleted database - _, saveerr = db.SaveDoc("6", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr = db.saveDoc("6", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.Error(t, saveerr, "Error should have been thrown while attempting to save to a deleted database") //Attempt to read from a deleted database - _, _, geterr = db.ReadDoc("6") + _, _, geterr = db.readDoc("6") assert.NoError(t, geterr, "Error should not have been thrown for a missing database, nil value is returned") } func TestDBRequestTimeout(t *testing.T) { - startCouchDB() - database := "testdbrequesttimeout" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) //create an impossibly short timeout + defaultMaxRetries := config.MaxRetries + defaultMaxRetriesOnStartup := config.MaxRetriesOnStartup + defaultRequestTimeout := config.RequestTimeout impossibleTimeout := time.Nanosecond //create a new instance and database object with a timeout that will fail //Also use a maxRetriesOnStartup=3 to reduce the number of retries - config := testConfig() + config.MaxRetriesOnStartup = 3 config.RequestTimeout = impossibleTimeout - _, err = CreateCouchInstance(config, &disabled.Provider{}) + _, err := createCouchInstance(config, &disabled.Provider{}) assert.Error(t, err, "Error should have been thown while trying to create a couchdb instance with a connection timeout") //create a new instance and database object - config = testConfig() config.MaxRetries = -1 config.MaxRetriesOnStartup = 3 - _, err = CreateCouchInstance(config, &disabled.Provider{}) + _, err = createCouchInstance(config, &disabled.Provider{}) assert.Error(t, err, "Error should have been thrown while attempting to create a database") - + config.MaxRetries = defaultMaxRetries + config.RequestTimeout = defaultRequestTimeout + config.MaxRetriesOnStartup = defaultMaxRetriesOnStartup } func TestDBTimeoutConflictRetry(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbtimeoutretry" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - config := testConfig() + defaultMaxRetriesOnStartup := config.MaxRetriesOnStartup config.MaxRetriesOnStartup = 3 - couchInstance, err := CreateCouchInstance(config, &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb := db.GetDatabaseInfo() + dbResp, _, errdb := db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve database information") assert.Equal(t, database, dbResp.DbName) //Save the test document - _, saveerr := db.SaveDoc("1", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr := db.saveDoc("1", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") //Retrieve the test document - _, _, geterr := db.ReadDoc("1") + _, _, geterr := db.readDoc("1") assert.NoError(t, geterr, "Error when trying to retrieve a document") //Save the test document with an invalid rev. This should cause a retry - _, saveerr = db.SaveDoc("1", "1-11111111111111111111111111111111", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr = db.saveDoc("1", "1-11111111111111111111111111111111", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document with a revision conflict") //Delete the test document with an invalid rev. This should cause a retry - deleteerr := db.DeleteDoc("1", "1-11111111111111111111111111111111") + deleteerr := db.deleteDoc("1", "1-11111111111111111111111111111111") assert.NoError(t, deleteerr, "Error when trying to delete a document with a revision conflict") + config.MaxRetriesOnStartup = defaultMaxRetriesOnStartup } func TestDBBadNumberOfRetries(t *testing.T) { - startCouchDB() - database := "testdbbadretries" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) //create a new instance and database object - config := testConfig() + defaultMaxRetries := config.MaxRetries + defaultMaxRetriesOnStartup := config.MaxRetriesOnStartup config.MaxRetries = -1 config.MaxRetriesOnStartup = 3 - _, err = CreateCouchInstance(config, &disabled.Provider{}) + _, err := createCouchInstance(config, &disabled.Provider{}) assert.Error(t, err, "Error should have been thrown while attempting to create a database") - + config.MaxRetries = defaultMaxRetries + config.MaxRetriesOnStartup = defaultMaxRetriesOnStartup } func TestDBBadJSON(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbbadjson" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb := db.GetDatabaseInfo() + dbResp, _, errdb := db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve database information") assert.Equal(t, database, dbResp.DbName) badJSON := []byte(`{"asset_name"}`) //Save the test document - _, saveerr := db.SaveDoc("1", "", &CouchDoc{JSONValue: badJSON, Attachments: nil}) + _, saveerr := db.saveDoc("1", "", &couchDoc{jsonValue: badJSON, attachments: nil}) assert.Error(t, saveerr, "Error should have been thrown for a bad JSON") } func TestPrefixScan(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testprefixscan" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb := db.GetDatabaseInfo() + dbResp, _, errdb := db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve database information") assert.Equal(t, database, dbResp.DbName) @@ -744,135 +734,139 @@ func TestPrefixScan(t *testing.T) { id1 := string(0) + string(i) + string(0) id2 := string(0) + string(i) + string(1) id3 := string(0) + string(i) + string(utf8.MaxRune-1) - _, saveerr := db.SaveDoc(id1, "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr := db.saveDoc(id1, "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") - _, saveerr = db.SaveDoc(id2, "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr = db.saveDoc(id2, "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") - _, saveerr = db.SaveDoc(id3, "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr = db.saveDoc(id3, "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") } startKey := string(0) + string(10) endKey := startKey + string(utf8.MaxRune) - _, _, geterr := db.ReadDoc(endKey) + _, _, geterr := db.readDoc(endKey) assert.NoError(t, geterr, "Error when trying to get lastkey") - resultsPtr, _, geterr := db.ReadDocRange(startKey, endKey, 1000) + resultsPtr, _, geterr := db.readDocRange(startKey, endKey, 1000) assert.NoError(t, geterr, "Error when trying to perform a range scan") assert.NotNil(t, resultsPtr) results := resultsPtr assert.Equal(t, 3, len(results)) - assert.Equal(t, string(0)+string(10)+string(0), results[0].ID) - assert.Equal(t, string(0)+string(10)+string(1), results[1].ID) - assert.Equal(t, string(0)+string(10)+string(utf8.MaxRune-1), results[2].ID) + assert.Equal(t, string(0)+string(10)+string(0), results[0].id) + assert.Equal(t, string(0)+string(10)+string(1), results[1].id) + assert.Equal(t, string(0)+string(10)+string(utf8.MaxRune-1), results[2].id) //Drop the database - _, errdbdrop := db.DropDatabase() + _, errdbdrop := db.dropDatabase() assert.NoError(t, errdbdrop, "Error dropping database") //Retrieve the info for the new database and make sure the name matches - _, _, errdbinfo := db.GetDatabaseInfo() + _, _, errdbinfo := db.getDatabaseInfo() assert.Error(t, errdbinfo, "Error should have been thrown for missing database") } func TestDBSaveAttachment(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbsaveattachment" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) byteText := []byte(`This is a test document. This is only a test`) - attachment := &AttachmentInfo{} + attachment := &attachmentInfo{} attachment.AttachmentBytes = byteText attachment.ContentType = "text/plain" attachment.Length = uint64(len(byteText)) attachment.Name = "valueBytes" - attachments := []*AttachmentInfo{} + attachments := []*attachmentInfo{} attachments = append(attachments, attachment) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Save the test document - _, saveerr := db.SaveDoc("10", "", &CouchDoc{JSONValue: nil, Attachments: attachments}) + _, saveerr := db.saveDoc("10", "", &couchDoc{jsonValue: nil, attachments: attachments}) assert.NoError(t, saveerr, "Error when trying to save a document") //Attempt to retrieve the updated test document with attachments - couchDoc, _, geterr2 := db.ReadDoc("10") + couchDoc, _, geterr2 := db.readDoc("10") assert.NoError(t, geterr2, "Error when trying to retrieve a document with attachment") - assert.NotNil(t, couchDoc.Attachments) - assert.Equal(t, byteText, couchDoc.Attachments[0].AttachmentBytes) - assert.Equal(t, attachment.Length, couchDoc.Attachments[0].Length) + assert.NotNil(t, couchDoc.attachments) + assert.Equal(t, byteText, couchDoc.attachments[0].AttachmentBytes) + assert.Equal(t, attachment.Length, couchDoc.attachments[0].Length) } func TestDBDeleteDocument(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbdeletedocument" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Save the test document - _, saveerr := db.SaveDoc("2", "", &CouchDoc{JSONValue: assetJSON, Attachments: nil}) + _, saveerr := db.saveDoc("2", "", &couchDoc{jsonValue: assetJSON, attachments: nil}) assert.NoError(t, saveerr, "Error when trying to save a document") //Attempt to retrieve the test document - _, _, readErr := db.ReadDoc("2") + _, _, readErr := db.readDoc("2") assert.NoError(t, readErr, "Error when trying to retrieve a document with attachment") //Delete the test document - deleteErr := db.DeleteDoc("2", "") + deleteErr := db.deleteDoc("2", "") assert.NoError(t, deleteErr, "Error when trying to delete a document") //Attempt to retrieve the test document - readValue, _, _ := db.ReadDoc("2") + readValue, _, _ := db.readDoc("2") assert.Nil(t, readValue) } func TestDBDeleteNonExistingDocument(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbdeletenonexistingdocument" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Save the test document - deleteErr := db.DeleteDoc("2", "") + deleteErr := db.deleteDoc("2", "") assert.NoError(t, deleteErr, "Error when trying to delete a non existing document") } func TestCouchDBVersion(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) + err := checkCouchDBVersion("2.0.0") assert.NoError(t, err, "Error should not have been thrown for valid version") @@ -888,11 +882,11 @@ func TestCouchDBVersion(t *testing.T) { } func TestIndexOperations(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testindexoperations" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) byteJSON1 := []byte(`{"_id":"1", "asset_name":"marble1","color":"blue","size":1,"owner":"jerry"}`) byteJSON2 := []byte(`{"_id":"2", "asset_name":"marble2","color":"red","size":2,"owner":"tom"}`) @@ -906,41 +900,41 @@ func TestIndexOperations(t *testing.T) { byteJSON10 := []byte(`{"_id":"10", "asset_name":"marble10","color":"white","size":10,"owner":"tom"}`) //create a new instance and database object -------------------------------------------------------- - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") - batchUpdateDocs := []*CouchDoc{} + batchUpdateDocs := []*couchDoc{} - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON1, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON2, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON3, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON4, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON5, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON6, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON7, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON8, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON9, Attachments: nil}) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: byteJSON10, Attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON1, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON2, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON3, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON4, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON5, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON6, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON7, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON8, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON9, attachments: nil}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: byteJSON10, attachments: nil}) - _, err = db.BatchUpdateDocuments(batchUpdateDocs) + _, err = db.batchUpdateDocuments(batchUpdateDocs) assert.NoError(t, err, "Error adding batch of documents") //Create an index definition indexDefSize := `{"index":{"fields":[{"size":"desc"}]},"ddoc":"indexSizeSortDoc", "name":"indexSizeSortName","type":"json"}` //Create the index - _, err = db.CreateIndex(indexDefSize) + _, err = db.createIndex(indexDefSize) assert.NoError(t, err, "Error thrown while creating an index") //Retrieve the list of indexes //Delay for 100ms since CouchDB index list is updated async after index create/drop time.Sleep(100 * time.Millisecond) - listResult, err := db.ListIndex() + listResult, err := db.listIndex() assert.NoError(t, err, "Error thrown while retrieving indexes") //There should only be one item returned @@ -958,26 +952,26 @@ func TestIndexOperations(t *testing.T) { indexDefColor := `{"index":{"fields":[{"color":"desc"}]}}` //Create the index - _, err = db.CreateIndex(indexDefColor) + _, err = db.createIndex(indexDefColor) assert.NoError(t, err, "Error thrown while creating an index") //Retrieve the list of indexes //Delay for 100ms since CouchDB index list is updated async after index create/drop time.Sleep(100 * time.Millisecond) - listResult, err = db.ListIndex() + listResult, err = db.listIndex() assert.NoError(t, err, "Error thrown while retrieving indexes") //There should be two indexes returned assert.Equal(t, 2, len(listResult)) //Delete the named index - err = db.DeleteIndex("indexSizeSortDoc", "indexSizeSortName") + err = db.deleteIndex("indexSizeSortDoc", "indexSizeSortName") assert.NoError(t, err, "Error thrown while deleting an index") //Retrieve the list of indexes //Delay for 100ms since CouchDB index list is updated async after index create/drop time.Sleep(100 * time.Millisecond) - listResult, err = db.ListIndex() + listResult, err = db.listIndex() assert.NoError(t, err, "Error thrown while retrieving indexes") //There should be one index returned @@ -985,14 +979,14 @@ func TestIndexOperations(t *testing.T) { //Delete the unnamed index for _, elem := range listResult { - err = db.DeleteIndex(elem.DesignDocument, elem.Name) + err = db.deleteIndex(elem.DesignDocument, elem.Name) assert.NoError(t, err, "Error thrown while deleting an index") } //Retrieve the list of indexes //Delay for 100ms since CouchDB index list is updated async after index create/drop time.Sleep(100 * time.Millisecond) - listResult, err = db.ListIndex() + listResult, err = db.listIndex() assert.NoError(t, err, "Error thrown while retrieving indexes") assert.Equal(t, 0, len(listResult)) @@ -1000,25 +994,25 @@ func TestIndexOperations(t *testing.T) { queryString := `{"selector":{"size": {"$gt": 0}},"fields": ["_id", "_rev", "owner", "asset_name", "color", "size"], "sort":[{"size":"desc"}], "limit": 10,"skip": 0}` //Execute a query with a sort, this should throw the exception - _, _, err = db.QueryDocuments(queryString) + _, _, err = db.queryDocuments(queryString) assert.Error(t, err, "Error should have thrown while querying without a valid index") //Create the index - _, err = db.CreateIndex(indexDefSize) + _, err = db.createIndex(indexDefSize) assert.NoError(t, err, "Error thrown while creating an index") //Delay for 100ms since CouchDB index list is updated async after index create/drop time.Sleep(100 * time.Millisecond) //Execute a query with an index, this should succeed - _, _, err = db.QueryDocuments(queryString) + _, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error thrown while querying with an index") //Create another index definition indexDefSize = `{"index":{"fields":[{"data.size":"desc"},{"data.owner":"desc"}]},"ddoc":"indexSizeOwnerSortDoc", "name":"indexSizeOwnerSortName","type":"json"}` //Create the index - dbResp, err := db.CreateIndex(indexDefSize) + dbResp, err := db.createIndex(indexDefSize) assert.NoError(t, err, "Error thrown while creating an index") //verify the response is "created" for an index creation @@ -1028,7 +1022,7 @@ func TestIndexOperations(t *testing.T) { time.Sleep(100 * time.Millisecond) //Update the index - dbResp, err = db.CreateIndex(indexDefSize) + dbResp, err = db.createIndex(indexDefSize) assert.NoError(t, err, "Error thrown while creating an index") //verify the response is "exists" for an update @@ -1037,7 +1031,7 @@ func TestIndexOperations(t *testing.T) { //Retrieve the list of indexes //Delay for 100ms since CouchDB index list is updated async after index create/drop time.Sleep(100 * time.Millisecond) - listResult, err = db.ListIndex() + listResult, err = db.listIndex() assert.NoError(t, err, "Error thrown while retrieving indexes") //There should only be two definitions @@ -1047,20 +1041,23 @@ func TestIndexOperations(t *testing.T) { indexDefSize = `{"index"{"fields":[{"data.size":"desc"},{"data.owner":"desc"}]},"ddoc":"indexSizeOwnerSortDoc", "name":"indexSizeOwnerSortName","type":"json"}` //Create the index - _, err = db.CreateIndex(indexDefSize) + _, err = db.createIndex(indexDefSize) assert.Error(t, err, "Error should have been thrown for an invalid index JSON") //Create an invalid index definition with a valid JSON and an invalid index definition indexDefSize = `{"index":{"fields2":[{"data.size":"desc"},{"data.owner":"desc"}]},"ddoc":"indexSizeOwnerSortDoc", "name":"indexSizeOwnerSortName","type":"json"}` //Create the index - _, err = db.CreateIndex(indexDefSize) + _, err = db.createIndex(indexDefSize) assert.Error(t, err, "Error should have been thrown for an invalid index definition") } func TestRichQuery(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) byteJSON01 := []byte(`{"asset_name":"marble01","color":"blue","size":1,"owner":"jerry"}`) byteJSON02 := []byte(`{"asset_name":"marble02","color":"red","size":2,"owner":"tom"}`) byteJSON03 := []byte(`{"asset_name":"marble03","color":"green","size":3,"owner":"jerry"}`) @@ -1074,162 +1071,159 @@ func TestRichQuery(t *testing.T) { byteJSON11 := []byte(`{"asset_name":"marble11","color":"green","size":11,"owner":"tom"}`) byteJSON12 := []byte(`{"asset_name":"marble12","color":"green","size":12,"owner":"frank"}`) - attachment1 := &AttachmentInfo{} + attachment1 := &attachmentInfo{} attachment1.AttachmentBytes = []byte(`marble01 - test attachment`) attachment1.ContentType = "application/octet-stream" attachment1.Name = "data" - attachments1 := []*AttachmentInfo{} + attachments1 := []*attachmentInfo{} attachments1 = append(attachments1, attachment1) - attachment2 := &AttachmentInfo{} + attachment2 := &attachmentInfo{} attachment2.AttachmentBytes = []byte(`marble02 - test attachment`) attachment2.ContentType = "application/octet-stream" attachment2.Name = "data" - attachments2 := []*AttachmentInfo{} + attachments2 := []*attachmentInfo{} attachments2 = append(attachments2, attachment2) - attachment3 := &AttachmentInfo{} + attachment3 := &attachmentInfo{} attachment3.AttachmentBytes = []byte(`marble03 - test attachment`) attachment3.ContentType = "application/octet-stream" attachment3.Name = "data" - attachments3 := []*AttachmentInfo{} + attachments3 := []*attachmentInfo{} attachments3 = append(attachments3, attachment3) - attachment4 := &AttachmentInfo{} + attachment4 := &attachmentInfo{} attachment4.AttachmentBytes = []byte(`marble04 - test attachment`) attachment4.ContentType = "application/octet-stream" attachment4.Name = "data" - attachments4 := []*AttachmentInfo{} + attachments4 := []*attachmentInfo{} attachments4 = append(attachments4, attachment4) - attachment5 := &AttachmentInfo{} + attachment5 := &attachmentInfo{} attachment5.AttachmentBytes = []byte(`marble05 - test attachment`) attachment5.ContentType = "application/octet-stream" attachment5.Name = "data" - attachments5 := []*AttachmentInfo{} + attachments5 := []*attachmentInfo{} attachments5 = append(attachments5, attachment5) - attachment6 := &AttachmentInfo{} + attachment6 := &attachmentInfo{} attachment6.AttachmentBytes = []byte(`marble06 - test attachment`) attachment6.ContentType = "application/octet-stream" attachment6.Name = "data" - attachments6 := []*AttachmentInfo{} + attachments6 := []*attachmentInfo{} attachments6 = append(attachments6, attachment6) - attachment7 := &AttachmentInfo{} + attachment7 := &attachmentInfo{} attachment7.AttachmentBytes = []byte(`marble07 - test attachment`) attachment7.ContentType = "application/octet-stream" attachment7.Name = "data" - attachments7 := []*AttachmentInfo{} + attachments7 := []*attachmentInfo{} attachments7 = append(attachments7, attachment7) - attachment8 := &AttachmentInfo{} + attachment8 := &attachmentInfo{} attachment8.AttachmentBytes = []byte(`marble08 - test attachment`) attachment8.ContentType = "application/octet-stream" attachment7.Name = "data" - attachments8 := []*AttachmentInfo{} + attachments8 := []*attachmentInfo{} attachments8 = append(attachments8, attachment8) - attachment9 := &AttachmentInfo{} + attachment9 := &attachmentInfo{} attachment9.AttachmentBytes = []byte(`marble09 - test attachment`) attachment9.ContentType = "application/octet-stream" attachment9.Name = "data" - attachments9 := []*AttachmentInfo{} + attachments9 := []*attachmentInfo{} attachments9 = append(attachments9, attachment9) - attachment10 := &AttachmentInfo{} + attachment10 := &attachmentInfo{} attachment10.AttachmentBytes = []byte(`marble10 - test attachment`) attachment10.ContentType = "application/octet-stream" attachment10.Name = "data" - attachments10 := []*AttachmentInfo{} + attachments10 := []*attachmentInfo{} attachments10 = append(attachments10, attachment10) - attachment11 := &AttachmentInfo{} + attachment11 := &attachmentInfo{} attachment11.AttachmentBytes = []byte(`marble11 - test attachment`) attachment11.ContentType = "application/octet-stream" attachment11.Name = "data" - attachments11 := []*AttachmentInfo{} + attachments11 := []*attachmentInfo{} attachments11 = append(attachments11, attachment11) - attachment12 := &AttachmentInfo{} + attachment12 := &attachmentInfo{} attachment12.AttachmentBytes = []byte(`marble12 - test attachment`) attachment12.ContentType = "application/octet-stream" attachment12.Name = "data" - attachments12 := []*AttachmentInfo{} + attachments12 := []*attachmentInfo{} attachments12 = append(attachments12, attachment12) database := "testrichquery" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object -------------------------------------------------------- - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Save the test document - _, saveerr := db.SaveDoc("marble01", "", &CouchDoc{JSONValue: byteJSON01, Attachments: attachments1}) + _, saveerr := db.saveDoc("marble01", "", &couchDoc{jsonValue: byteJSON01, attachments: attachments1}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble02", "", &CouchDoc{JSONValue: byteJSON02, Attachments: attachments2}) + _, saveerr = db.saveDoc("marble02", "", &couchDoc{jsonValue: byteJSON02, attachments: attachments2}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble03", "", &CouchDoc{JSONValue: byteJSON03, Attachments: attachments3}) + _, saveerr = db.saveDoc("marble03", "", &couchDoc{jsonValue: byteJSON03, attachments: attachments3}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble04", "", &CouchDoc{JSONValue: byteJSON04, Attachments: attachments4}) + _, saveerr = db.saveDoc("marble04", "", &couchDoc{jsonValue: byteJSON04, attachments: attachments4}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble05", "", &CouchDoc{JSONValue: byteJSON05, Attachments: attachments5}) + _, saveerr = db.saveDoc("marble05", "", &couchDoc{jsonValue: byteJSON05, attachments: attachments5}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble06", "", &CouchDoc{JSONValue: byteJSON06, Attachments: attachments6}) + _, saveerr = db.saveDoc("marble06", "", &couchDoc{jsonValue: byteJSON06, attachments: attachments6}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble07", "", &CouchDoc{JSONValue: byteJSON07, Attachments: attachments7}) + _, saveerr = db.saveDoc("marble07", "", &couchDoc{jsonValue: byteJSON07, attachments: attachments7}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble08", "", &CouchDoc{JSONValue: byteJSON08, Attachments: attachments8}) + _, saveerr = db.saveDoc("marble08", "", &couchDoc{jsonValue: byteJSON08, attachments: attachments8}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble09", "", &CouchDoc{JSONValue: byteJSON09, Attachments: attachments9}) + _, saveerr = db.saveDoc("marble09", "", &couchDoc{jsonValue: byteJSON09, attachments: attachments9}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble10", "", &CouchDoc{JSONValue: byteJSON10, Attachments: attachments10}) + _, saveerr = db.saveDoc("marble10", "", &couchDoc{jsonValue: byteJSON10, attachments: attachments10}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble11", "", &CouchDoc{JSONValue: byteJSON11, Attachments: attachments11}) + _, saveerr = db.saveDoc("marble11", "", &couchDoc{jsonValue: byteJSON11, attachments: attachments11}) assert.NoError(t, saveerr, "Error when trying to save a document") //Save the test document - _, saveerr = db.SaveDoc("marble12", "", &CouchDoc{JSONValue: byteJSON12, Attachments: attachments12}) + _, saveerr = db.saveDoc("marble12", "", &couchDoc{jsonValue: byteJSON12, attachments: attachments12}) assert.NoError(t, saveerr, "Error when trying to save a document") //Test query with invalid JSON ------------------------------------------------------------------- queryString := `{"selector":{"owner":}}` - _, _, err = db.QueryDocuments(queryString) + _, _, err = db.queryDocuments(queryString) assert.Error(t, err, "Error should have been thrown for bad json") //Test query with object ------------------------------------------------------------------- queryString = `{"selector":{"owner":{"$eq":"jerry"}}}` - queryResult, _, err := db.QueryDocuments(queryString) + queryResult, _, err := db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 3 results for owner="jerry" @@ -1238,7 +1232,7 @@ func TestRichQuery(t *testing.T) { //Test query with implicit operator -------------------------------------------------------------- queryString = `{"selector":{"owner":"jerry"}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 3 results for owner="jerry" @@ -1247,7 +1241,7 @@ func TestRichQuery(t *testing.T) { //Test query with specified fields ------------------------------------------------------------------- queryString = `{"selector":{"owner":{"$eq":"jerry"}},"fields": ["owner","asset_name","color","size"]}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 3 results for owner="jerry" @@ -1256,7 +1250,7 @@ func TestRichQuery(t *testing.T) { //Test query with a leading operator ------------------------------------------------------------------- queryString = `{"selector":{"$or":[{"owner":{"$eq":"jerry"}},{"owner": {"$eq": "frank"}}]}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 4 results for owner="jerry" or owner="frank" @@ -1265,7 +1259,7 @@ func TestRichQuery(t *testing.T) { //Test query implicit and explicit operator ------------------------------------------------------------------ queryString = `{"selector":{"color":"green","$or":[{"owner":"tom"},{"owner":"frank"}]}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 2 results for color="green" and (owner="jerry" or owner="frank") @@ -1274,7 +1268,7 @@ func TestRichQuery(t *testing.T) { //Test query with a leading operator ------------------------------------------------------------------------- queryString = `{"selector":{"$and":[{"size":{"$gte":2}},{"size":{"$lte":5}}]}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 4 results for size >= 2 and size <= 5 @@ -1283,7 +1277,7 @@ func TestRichQuery(t *testing.T) { //Test query with leading and embedded operator ------------------------------------------------------------- queryString = `{"selector":{"$and":[{"size":{"$gte":3}},{"size":{"$lte":10}},{"$not":{"size":7}}]}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 7 results for size >= 3 and size <= 10 and not 7 @@ -1292,29 +1286,29 @@ func TestRichQuery(t *testing.T) { //Test query with leading operator and array of objects ---------------------------------------------------------- queryString = `{"selector":{"$and":[{"size":{"$gte":2}},{"size":{"$lte":10}},{"$nor":[{"size":3},{"size":5},{"size":7}]}]}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 6 results for size >= 2 and size <= 10 and not 3,5 or 7 assert.Equal(t, 6, len(queryResult)) //Test a range query --------------------------------------------------------------------------------------------- - queryResult, _, err = db.ReadDocRange("marble02", "marble06", 10000) + queryResult, _, err = db.readDocRange("marble02", "marble06", 10000) assert.NoError(t, err, "Error when attempting to execute a range query") //There should be 4 results assert.Equal(t, 4, len(queryResult)) //Attachments retrieved should be correct - assert.Equal(t, attachment2.AttachmentBytes, queryResult[0].Attachments[0].AttachmentBytes) - assert.Equal(t, attachment3.AttachmentBytes, queryResult[1].Attachments[0].AttachmentBytes) - assert.Equal(t, attachment4.AttachmentBytes, queryResult[2].Attachments[0].AttachmentBytes) - assert.Equal(t, attachment5.AttachmentBytes, queryResult[3].Attachments[0].AttachmentBytes) + assert.Equal(t, attachment2.AttachmentBytes, queryResult[0].attachments[0].AttachmentBytes) + assert.Equal(t, attachment3.AttachmentBytes, queryResult[1].attachments[0].AttachmentBytes) + assert.Equal(t, attachment4.AttachmentBytes, queryResult[2].attachments[0].AttachmentBytes) + assert.Equal(t, attachment5.AttachmentBytes, queryResult[3].attachments[0].AttachmentBytes) //Test query with for tom ------------------------------------------------------------------- queryString = `{"selector":{"owner":{"$eq":"tom"}}}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 8 results for owner="tom" @@ -1323,7 +1317,7 @@ func TestRichQuery(t *testing.T) { //Test query with for tom with limit ------------------------------------------------------------------- queryString = `{"selector":{"owner":{"$eq":"tom"}},"limit":2}` - queryResult, _, err = db.QueryDocuments(queryString) + queryResult, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query") //There should be 2 results for owner="tom" with a limit of 2 @@ -1333,7 +1327,7 @@ func TestRichQuery(t *testing.T) { indexDefSize := `{"index":{"fields":[{"size":"desc"}]},"ddoc":"indexSizeSortDoc", "name":"indexSizeSortName","type":"json"}` //Create the index - _, err = db.CreateIndex(indexDefSize) + _, err = db.createIndex(indexDefSize) assert.NoError(t, err, "Error thrown while creating an index") //Delay for 100ms since CouchDB index list is updated async after index create/drop @@ -1342,12 +1336,12 @@ func TestRichQuery(t *testing.T) { //Test query with valid index ------------------------------------------------------------------- queryString = `{"selector":{"size":{"$gt":0}}, "use_index":["indexSizeSortDoc","indexSizeSortName"]}` - _, _, err = db.QueryDocuments(queryString) + _, _, err = db.queryDocuments(queryString) assert.NoError(t, err, "Error when attempting to execute a query with a valid index") } -func testBatchBatchOperations(t *testing.T, maxRetries int) { +func testBatchBatchOperations(t *testing.T, config *ledger.CouchDBConfig) { byteJSON01 := []byte(`{"_id":"marble01","asset_name":"marble01","color":"blue","size":"1","owner":"jerry"}`) byteJSON02 := []byte(`{"_id":"marble02","asset_name":"marble02","color":"red","size":"2","owner":"tom"}`) @@ -1356,70 +1350,67 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { byteJSON05 := []byte(`{"_id":"marble05","asset_name":"marble05","color":"blue","size":"5","owner":"jerry"}`) byteJSON06 := []byte(`{"_id":"marble06#$&'()*+,/:;=?@[]","asset_name":"marble06#$&'()*+,/:;=?@[]","color":"blue","size":"6","owner":"jerry"}`) - attachment1 := &AttachmentInfo{} + attachment1 := &attachmentInfo{} attachment1.AttachmentBytes = []byte(`marble01 - test attachment`) attachment1.ContentType = "application/octet-stream" attachment1.Name = "data" - attachments1 := []*AttachmentInfo{} + attachments1 := []*attachmentInfo{} attachments1 = append(attachments1, attachment1) - attachment2 := &AttachmentInfo{} + attachment2 := &attachmentInfo{} attachment2.AttachmentBytes = []byte(`marble02 - test attachment`) attachment2.ContentType = "application/octet-stream" attachment2.Name = "data" - attachments2 := []*AttachmentInfo{} + attachments2 := []*attachmentInfo{} attachments2 = append(attachments2, attachment2) - attachment3 := &AttachmentInfo{} + attachment3 := &attachmentInfo{} attachment3.AttachmentBytes = []byte(`marble03 - test attachment`) attachment3.ContentType = "application/octet-stream" attachment3.Name = "data" - attachments3 := []*AttachmentInfo{} + attachments3 := []*attachmentInfo{} attachments3 = append(attachments3, attachment3) - attachment4 := &AttachmentInfo{} + attachment4 := &attachmentInfo{} attachment4.AttachmentBytes = []byte(`marble04 - test attachment`) attachment4.ContentType = "application/octet-stream" attachment4.Name = "data" - attachments4 := []*AttachmentInfo{} + attachments4 := []*attachmentInfo{} attachments4 = append(attachments4, attachment4) - attachment5 := &AttachmentInfo{} + attachment5 := &attachmentInfo{} attachment5.AttachmentBytes = []byte(`marble05 - test attachment`) attachment5.ContentType = "application/octet-stream" attachment5.Name = "data" - attachments5 := []*AttachmentInfo{} + attachments5 := []*attachmentInfo{} attachments5 = append(attachments5, attachment5) - attachment6 := &AttachmentInfo{} + attachment6 := &attachmentInfo{} attachment6.AttachmentBytes = []byte(`marble06#$&'()*+,/:;=?@[] - test attachment`) attachment6.ContentType = "application/octet-stream" attachment6.Name = "data" - attachments6 := []*AttachmentInfo{} + attachments6 := []*attachmentInfo{} attachments6 = append(attachments6, attachment6) database := "testbatch" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object -------------------------------------------------------- - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") - batchUpdateDocs := []*CouchDoc{} + batchUpdateDocs := []*couchDoc{} - value1 := &CouchDoc{JSONValue: byteJSON01, Attachments: attachments1} - value2 := &CouchDoc{JSONValue: byteJSON02, Attachments: attachments2} - value3 := &CouchDoc{JSONValue: byteJSON03, Attachments: attachments3} - value4 := &CouchDoc{JSONValue: byteJSON04, Attachments: attachments4} - value5 := &CouchDoc{JSONValue: byteJSON05, Attachments: attachments5} - value6 := &CouchDoc{JSONValue: byteJSON06, Attachments: attachments6} + value1 := &couchDoc{jsonValue: byteJSON01, attachments: attachments1} + value2 := &couchDoc{jsonValue: byteJSON02, attachments: attachments2} + value3 := &couchDoc{jsonValue: byteJSON03, attachments: attachments3} + value4 := &couchDoc{jsonValue: byteJSON04, attachments: attachments4} + value5 := &couchDoc{jsonValue: byteJSON05, attachments: attachments5} + value6 := &couchDoc{jsonValue: byteJSON06, attachments: attachments6} batchUpdateDocs = append(batchUpdateDocs, value1) batchUpdateDocs = append(batchUpdateDocs, value2) @@ -1428,7 +1419,7 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { batchUpdateDocs = append(batchUpdateDocs, value5) batchUpdateDocs = append(batchUpdateDocs, value6) - batchUpdateResp, err := db.BatchUpdateDocuments(batchUpdateDocs) + batchUpdateResp, err := db.batchUpdateDocuments(batchUpdateDocs) assert.NoError(t, err, "Error when attempting to update a batch of documents") //check to make sure each batch update response was successful @@ -1438,11 +1429,11 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { //---------------------------------------------- //Test Retrieve JSON - dbGetResp, _, geterr := db.ReadDoc("marble01") + dbGetResp, _, geterr := db.readDoc("marble01") assert.NoError(t, geterr, "Error when attempting read a document") assetResp := &Asset{} - geterr = json.Unmarshal(dbGetResp.JSONValue, &assetResp) + geterr = json.Unmarshal(dbGetResp.jsonValue, &assetResp) assert.NoError(t, geterr, "Error when trying to retrieve a document") //Verify the owner retrieved matches assert.Equal(t, "jerry", assetResp.Owner) @@ -1450,31 +1441,31 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { //---------------------------------------------- // Test Retrieve JSON using ID with URL special characters, // this will confirm that batch document IDs and URL IDs are consistent, even if they include special characters - dbGetResp, _, geterr = db.ReadDoc("marble06#$&'()*+,/:;=?@[]") + dbGetResp, _, geterr = db.readDoc("marble06#$&'()*+,/:;=?@[]") assert.NoError(t, geterr, "Error when attempting read a document") assetResp = &Asset{} - geterr = json.Unmarshal(dbGetResp.JSONValue, &assetResp) + geterr = json.Unmarshal(dbGetResp.jsonValue, &assetResp) assert.NoError(t, geterr, "Error when trying to retrieve a document") //Verify the owner retrieved matches assert.Equal(t, "jerry", assetResp.Owner) //---------------------------------------------- //Test retrieve binary - dbGetResp, _, geterr = db.ReadDoc("marble03") + dbGetResp, _, geterr = db.readDoc("marble03") assert.NoError(t, geterr, "Error when attempting read a document") //Retrieve the attachments - attachments := dbGetResp.Attachments + attachments := dbGetResp.attachments //Only one was saved, so take the first retrievedAttachment := attachments[0] //Verify the text matches assert.Equal(t, retrievedAttachment.AttachmentBytes, attachment3.AttachmentBytes) //---------------------------------------------- //Test Bad Updates - batchUpdateDocs = []*CouchDoc{} + batchUpdateDocs = []*couchDoc{} batchUpdateDocs = append(batchUpdateDocs, value1) batchUpdateDocs = append(batchUpdateDocs, value2) - batchUpdateResp, err = db.BatchUpdateDocuments(batchUpdateDocs) + batchUpdateResp, err = db.batchUpdateDocuments(batchUpdateDocs) assert.NoError(t, err, "Error when attempting to update a batch of documents") //No revision was provided, so these two updates should fail //Verify that the "Ok" field is returned as false @@ -1492,28 +1483,28 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { keys = append(keys, "marble01") keys = append(keys, "marble03") - batchRevs, err := db.BatchRetrieveDocumentMetadata(keys) + batchRevs, err := db.batchRetrieveDocumentMetadata(keys) assert.NoError(t, err, "Error when attempting retrieve revisions") - batchUpdateDocs = []*CouchDoc{} + batchUpdateDocs = []*couchDoc{} //iterate through the revision docs for _, revdoc := range batchRevs { if revdoc.ID == "marble01" { //update the json with the rev and add to the batch marble01Doc := addRevisionAndDeleteStatus(revdoc.Rev, byteJSON01, false) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: marble01Doc, Attachments: attachments1}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: marble01Doc, attachments: attachments1}) } if revdoc.ID == "marble03" { //update the json with the rev and add to the batch marble03Doc := addRevisionAndDeleteStatus(revdoc.Rev, byteJSON03, false) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: marble03Doc, Attachments: attachments3}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: marble03Doc, attachments: attachments3}) } } //Update couchdb with the batch - batchUpdateResp, err = db.BatchUpdateDocuments(batchUpdateDocs) + batchUpdateResp, err = db.batchUpdateDocuments(batchUpdateDocs) assert.NoError(t, err, "Error when attempting to update a batch of documents") //check to make sure each batch update response was successful for _, updateDoc := range batchUpdateResp { @@ -1528,27 +1519,27 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { keys = append(keys, "marble02") keys = append(keys, "marble04") - batchRevs, err = db.BatchRetrieveDocumentMetadata(keys) + batchRevs, err = db.batchRetrieveDocumentMetadata(keys) assert.NoError(t, err, "Error when attempting retrieve revisions") - batchUpdateDocs = []*CouchDoc{} + batchUpdateDocs = []*couchDoc{} //iterate through the revision docs for _, revdoc := range batchRevs { if revdoc.ID == "marble02" { //update the json with the rev and add to the batch marble02Doc := addRevisionAndDeleteStatus(revdoc.Rev, byteJSON02, true) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: marble02Doc, Attachments: attachments1}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: marble02Doc, attachments: attachments1}) } if revdoc.ID == "marble04" { //update the json with the rev and add to the batch marble04Doc := addRevisionAndDeleteStatus(revdoc.Rev, byteJSON04, true) - batchUpdateDocs = append(batchUpdateDocs, &CouchDoc{JSONValue: marble04Doc, Attachments: attachments3}) + batchUpdateDocs = append(batchUpdateDocs, &couchDoc{jsonValue: marble04Doc, attachments: attachments3}) } } //Update couchdb with the batch - batchUpdateResp, err = db.BatchUpdateDocuments(batchUpdateDocs) + batchUpdateResp, err = db.batchUpdateDocuments(batchUpdateDocs) assert.NoError(t, err, "Error when attempting to update a batch of documents") //check to make sure each batch update response was successful @@ -1557,14 +1548,14 @@ func testBatchBatchOperations(t *testing.T, maxRetries int) { } //Retrieve the test document - dbGetResp, _, geterr = db.ReadDoc("marble02") + dbGetResp, _, geterr = db.readDoc("marble02") assert.NoError(t, geterr, "Error when trying to retrieve a document") //assert the value was deleted assert.Nil(t, dbGetResp) //Retrieve the test document - dbGetResp, _, geterr = db.ReadDoc("marble04") + dbGetResp, _, geterr = db.readDoc("marble04") assert.NoError(t, geterr, "Error when trying to retrieve a document") //assert the value was deleted @@ -1597,65 +1588,65 @@ func addRevisionAndDeleteStatus(revision string, value []byte, deleted bool) []b } func TestDatabaseSecuritySettings(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdbsecuritysettings" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) //create a new instance and database object -------------------------------------------------------- - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") //Create a database security object - securityPermissions := &DatabaseSecurity{} + securityPermissions := &databaseSecurity{} securityPermissions.Admins.Names = append(securityPermissions.Admins.Names, "admin") securityPermissions.Members.Names = append(securityPermissions.Members.Names, "admin") //Apply security - err = db.ApplyDatabaseSecurity(securityPermissions) + err = db.applyDatabaseSecurity(securityPermissions) assert.NoError(t, err, "Error when trying to apply database security") //Retrieve database security - databaseSecurity, err := db.GetDatabaseSecurity() + dbSecurity, err := db.getDatabaseSecurity() assert.NoError(t, err, "Error when retrieving database security") //Verify retrieval of admins - assert.Equal(t, "admin", databaseSecurity.Admins.Names[0]) + assert.Equal(t, "admin", dbSecurity.Admins.Names[0]) //Verify retrieval of members - assert.Equal(t, "admin", databaseSecurity.Members.Names[0]) + assert.Equal(t, "admin", dbSecurity.Members.Names[0]) //Create an empty database security object - securityPermissions = &DatabaseSecurity{} + securityPermissions = &databaseSecurity{} //Apply the security - err = db.ApplyDatabaseSecurity(securityPermissions) + err = db.applyDatabaseSecurity(securityPermissions) assert.NoError(t, err, "Error when trying to apply database security") //Retrieve database security - databaseSecurity, err = db.GetDatabaseSecurity() + dbSecurity, err = db.getDatabaseSecurity() assert.NoError(t, err, "Error when retrieving database security") //Verify retrieval of admins, should be an empty array - assert.Equal(t, 0, len(databaseSecurity.Admins.Names)) + assert.Equal(t, 0, len(dbSecurity.Admins.Names)) //Verify retrieval of members, should be an empty array - assert.Equal(t, 0, len(databaseSecurity.Members.Names)) + assert.Equal(t, 0, len(dbSecurity.Members.Names)) } func TestURLWithSpecialCharacters(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testdb+with+plus_sign" - err := cleanup(database) - assert.NoError(t, err, "Error when trying to cleanup Error: %s", err) - defer cleanup(database) // parse a contructed URL finalURL, err := url.Parse("http://127.0.0.1:5984") @@ -1666,15 +1657,15 @@ func TestURLWithSpecialCharacters(t *testing.T) { assert.Equal(t, "http://127.0.0.1:5984/testdb%2Bwith%2Bplus_sign/_index/designdoc/json/indexname", couchdbURL.String()) //create a new instance and database object -------------------------------------------------------- - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to create couch instance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //create a new database - errdb := db.CreateDatabaseIfNotExist() + errdb := db.createDatabaseIfNotExist() assert.NoError(t, errdb, "Error when trying to create database") - dbInfo, _, errInfo := db.GetDatabaseInfo() + dbInfo, _, errInfo := db.getDatabaseInfo() assert.NoError(t, errInfo, "Error when trying to get database info") assert.Equal(t, database, dbInfo.DbName) diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test_export.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test_export.go new file mode 100644 index 00000000000..201fe0af643 --- /dev/null +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdb_test_export.go @@ -0,0 +1,46 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package statecouchdb + +import ( + "testing" + + "github.com/hyperledger/fabric/common/metrics/disabled" + "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/integration/runner" + "github.com/stretchr/testify/require" +) + +// StartCouchDB starts the CouchDB if it is not running already +func StartCouchDB(t *testing.T, binds []string) (addr string, stopCouchDBFunc func()) { + couchDB := &runner.CouchDB{Binds: binds} + require.NoError(t, couchDB.Start()) + return couchDB.Address(), func() { couchDB.Stop() } +} + +// DeleteApplicationDBs deletes all the databases other than fabric internal database +func DeleteApplicationDBs(t testing.TB, config *ledger.CouchDBConfig) { + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) + require.NoError(t, err) + dbNames, err := couchInstance.retrieveApplicationDBNames() + require.NoError(t, err) + for _, dbName := range dbNames { + if dbName != fabricInternalDBName { + dropDB(t, couchInstance, dbName) + } + } +} + +func dropDB(t testing.TB, couchInstance *couchInstance, dbName string) { + db := &couchDatabase{ + couchInstance: couchInstance, + dbName: dbName, + } + response, err := db.dropDatabase() + require.NoError(t, err) + require.True(t, response.Ok) +} diff --git a/core/ledger/util/couchdb/couchdbutil.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdbutil.go similarity index 85% rename from core/ledger/util/couchdb/couchdbutil.go rename to core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdbutil.go index d829e8c2ff5..a5d920ec77c 100644 --- a/core/ledger/util/couchdb/couchdbutil.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdbutil.go @@ -4,7 +4,7 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package couchdb +package statecouchdb import ( "bytes" @@ -19,6 +19,7 @@ import ( "github.com/hyperledger/fabric/common/metrics" "github.com/hyperledger/fabric/common/util" + "github.com/hyperledger/fabric/core/ledger" "github.com/pkg/errors" ) @@ -34,7 +35,7 @@ var chainNameAllowedLength = 50 var namespaceNameAllowedLength = 50 var collectionNameAllowedLength = 50 -func CreateCouchInstance(config *Config, metricsProvider metrics.Provider) (*CouchInstance, error) { +func createCouchInstance(config *ledger.CouchDBConfig, metricsProvider metrics.Provider) (*couchInstance, error) { // make sure the address is valid connectURL := &url.URL{ Host: config.Address, @@ -72,12 +73,12 @@ func CreateCouchInstance(config *Config, metricsProvider metrics.Provider) (*Cou client.Transport = transport //Create the CouchDB instance - couchInstance := &CouchInstance{ + couchInstance := &couchInstance{ conf: config, client: client, stats: newStats(metricsProvider), } - connectInfo, retVal, verifyErr := couchInstance.VerifyCouchConfig() + connectInfo, retVal, verifyErr := couchInstance.verifyCouchConfig() if verifyErr != nil { return nil, verifyErr } @@ -111,8 +112,8 @@ func checkCouchDBVersion(version string) error { return nil } -//CreateCouchDatabase creates a CouchDB database object, as well as the underlying database if it does not exist -func CreateCouchDatabase(couchInstance *CouchInstance, dbName string) (*CouchDatabase, error) { +//createCouchDatabase creates a CouchDB database object, as well as the underlying database if it does not exist +func createCouchDatabase(couchInstance *couchInstance, dbName string) (*couchDatabase, error) { databaseName, err := mapAndValidateDatabaseName(dbName) if err != nil { @@ -120,10 +121,10 @@ func CreateCouchDatabase(couchInstance *CouchInstance, dbName string) (*CouchDat return nil, err } - couchDBDatabase := CouchDatabase{CouchInstance: couchInstance, DBName: databaseName, IndexWarmCounter: 1} + couchDBDatabase := couchDatabase{couchInstance: couchInstance, dbName: databaseName, indexWarmCounter: 1} // Create CouchDB database upon ledger startup, if it doesn't already exist - err = couchDBDatabase.CreateDatabaseIfNotExist() + err = couchDBDatabase.createDatabaseIfNotExist() if err != nil { logger.Errorf("Error calling CouchDB CreateDatabaseIfNotExist() for dbName: %s, error: %s", dbName, err) return nil, err @@ -132,30 +133,30 @@ func CreateCouchDatabase(couchInstance *CouchInstance, dbName string) (*CouchDat return &couchDBDatabase, nil } -//CreateSystemDatabasesIfNotExist - creates the system databases if they do not exist -func CreateSystemDatabasesIfNotExist(couchInstance *CouchInstance) error { +//createSystemDatabasesIfNotExist - creates the system databases if they do not exist +func createSystemDatabasesIfNotExist(couchInstance *couchInstance) error { dbName := "_users" - systemCouchDBDatabase := CouchDatabase{CouchInstance: couchInstance, DBName: dbName, IndexWarmCounter: 1} - err := systemCouchDBDatabase.CreateDatabaseIfNotExist() + systemCouchDBDatabase := couchDatabase{couchInstance: couchInstance, dbName: dbName, indexWarmCounter: 1} + err := systemCouchDBDatabase.createDatabaseIfNotExist() if err != nil { - logger.Errorf("Error calling CouchDB CreateDatabaseIfNotExist() for system dbName: %s, error: %s", dbName, err) + logger.Errorf("Error calling CouchDB createDatabaseIfNotExist() for system dbName: %s, error: %s", dbName, err) return err } dbName = "_replicator" - systemCouchDBDatabase = CouchDatabase{CouchInstance: couchInstance, DBName: dbName, IndexWarmCounter: 1} - err = systemCouchDBDatabase.CreateDatabaseIfNotExist() + systemCouchDBDatabase = couchDatabase{couchInstance: couchInstance, dbName: dbName, indexWarmCounter: 1} + err = systemCouchDBDatabase.createDatabaseIfNotExist() if err != nil { - logger.Errorf("Error calling CouchDB CreateDatabaseIfNotExist() for system dbName: %s, error: %s", dbName, err) + logger.Errorf("Error calling CouchDB createDatabaseIfNotExist() for system dbName: %s, error: %s", dbName, err) return err } if couchInstance.conf.CreateGlobalChangesDB { dbName = "_global_changes" - systemCouchDBDatabase = CouchDatabase{CouchInstance: couchInstance, DBName: dbName, IndexWarmCounter: 1} - err = systemCouchDBDatabase.CreateDatabaseIfNotExist() + systemCouchDBDatabase = couchDatabase{couchInstance: couchInstance, dbName: dbName, indexWarmCounter: 1} + err = systemCouchDBDatabase.createDatabaseIfNotExist() if err != nil { - logger.Errorf("Error calling CouchDB CreateDatabaseIfNotExist() for system dbName: %s, error: %s", dbName, err) + logger.Errorf("Error calling CouchDB createDatabaseIfNotExist() for system dbName: %s, error: %s", dbName, err) return err } } @@ -179,9 +180,9 @@ func constructCouchDBUrl(connectURL *url.URL, dbName string, pathElements ...str return &url.URL{Opaque: buffer.String()} } -// ConstructMetadataDBName truncates the db name to couchdb allowed length to +// constructMetadataDBName truncates the db name to couchdb allowed length to // construct the metadataDBName -func ConstructMetadataDBName(dbName string) string { +func constructMetadataDBName(dbName string) string { if len(dbName) > maxLength { untruncatedDBName := dbName // Truncate the name if the length violates the allowed limit @@ -195,12 +196,12 @@ func ConstructMetadataDBName(dbName string) string { return dbName + "_" } -// ConstructNamespaceDBName truncates db name to couchdb allowed length to construct the final namespaceDBName +// constructNamespaceDBName truncates db name to couchdb allowed length to construct the final namespaceDBName // The passed namespace will be in one of the following formats: // - for namespaces containing regular public data // $$p - for namespaces containing private data collections // $$h - for namespaces containing hashes of private data collections -func ConstructNamespaceDBName(chainName, namespace string) string { +func constructNamespaceDBName(chainName, namespace string) string { // replace upper-case in namespace with a escape sequence '$' and the respective lower-case letter escapedNamespace := escapeUpperCase(namespace) namespaceDBName := chainName + "_" + escapedNamespace diff --git a/core/ledger/util/couchdb/couchdbutil_test.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdbutil_test.go similarity index 84% rename from core/ledger/util/couchdb/couchdbutil_test.go rename to core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdbutil_test.go index 9fd256c6f20..27d1c02c7a1 100644 --- a/core/ledger/util/couchdb/couchdbutil_test.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdbutil_test.go @@ -4,7 +4,7 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package couchdb +package statecouchdb import ( "encoding/hex" @@ -17,72 +17,74 @@ import ( //Unit test of couch db util functionality func TestCreateCouchDBConnectionAndDB(t *testing.T) { - startCouchDB() + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) database := "testcreatecouchdbconnectionanddb" - cleanup(database) - defer cleanup(database) //create a new connection - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to CreateCouchInstance") - _, err = CreateCouchDatabase(couchInstance, database) + _, err = createCouchDatabase(couchInstance, database) assert.NoError(t, err, "Error when trying to CreateCouchDatabase") } //Unit test of couch db util functionality func TestNotCreateCouchGlobalChangesDB(t *testing.T) { - startCouchDB() config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) config.CreateGlobalChangesDB = false database := "_global_changes" - cleanup(database) - defer cleanup(database) //create a new connection - couchInstance, err := CreateCouchInstance(config, &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to CreateCouchInstance") - db := CouchDatabase{CouchInstance: couchInstance, DBName: database} + db := couchDatabase{couchInstance: couchInstance, dbName: database} //Retrieve the info for the new database and make sure the name matches - _, _, errdb := db.GetDatabaseInfo() + _, _, errdb := db.getDatabaseInfo() assert.NotNil(t, errdb) } //Unit test of couch db util functionality func TestCreateCouchDBSystemDBs(t *testing.T) { - startCouchDB() - database := "testcreatecouchdbsystemdb" - cleanup(database) - defer cleanup(database) + config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) + config.CreateGlobalChangesDB = true //create a new connection - couchInstance, err := CreateCouchInstance(testConfig(), &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) assert.NoError(t, err, "Error when trying to CreateCouchInstance") - err = CreateSystemDatabasesIfNotExist(couchInstance) + err = createSystemDatabasesIfNotExist(couchInstance) assert.NoError(t, err, "Error when trying to create system databases") - db := CouchDatabase{CouchInstance: couchInstance, DBName: "_users"} + db := couchDatabase{couchInstance: couchInstance, dbName: "_users"} //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb := db.GetDatabaseInfo() + dbResp, _, errdb := db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve _users database information") assert.Equal(t, "_users", dbResp.DbName) - db = CouchDatabase{CouchInstance: couchInstance, DBName: "_replicator"} + db = couchDatabase{couchInstance: couchInstance, dbName: "_replicator"} //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb = db.GetDatabaseInfo() + dbResp, _, errdb = db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve _replicator database information") assert.Equal(t, "_replicator", dbResp.DbName) - db = CouchDatabase{CouchInstance: couchInstance, DBName: "_global_changes"} + db = couchDatabase{couchInstance: couchInstance, dbName: "_global_changes"} //Retrieve the info for the new database and make sure the name matches - dbResp, _, errdb = db.GetDatabaseInfo() + dbResp, _, errdb = db.getDatabaseInfo() assert.NoError(t, errdb, "Error when trying to retrieve _global_changes database information") assert.Equal(t, "_global_changes", dbResp.DbName) @@ -129,7 +131,7 @@ func TestConstructMetadataDBName(t *testing.T) { expectedDBName := truncatedChainName + "(" + hash + ")" + "_" expectedDBNameLength := 117 - constructedDBName := ConstructMetadataDBName(chainName) + constructedDBName := constructMetadataDBName(chainName) assert.Equal(t, expectedDBNameLength, len(constructedDBName)) assert.Equal(t, expectedDBName, constructedDBName) } @@ -163,7 +165,7 @@ func TestConstructedNamespaceDBName(t *testing.T) { expectedDBNameLength := 219 namespace := ns + "$$" + coll - constructedDBName := ConstructNamespaceDBName(chainName, namespace) + constructedDBName := constructNamespaceDBName(chainName, namespace) assert.Equal(t, expectedDBNameLength, len(constructedDBName)) assert.Equal(t, expectedDBName, constructedDBName) @@ -178,7 +180,7 @@ func TestConstructedNamespaceDBName(t *testing.T) { expectedDBNameLength = 167 namespace = ns - constructedDBName = ConstructNamespaceDBName(chainName, namespace) + constructedDBName = constructNamespaceDBName(chainName, namespace) assert.Equal(t, expectedDBNameLength, len(constructedDBName)) assert.Equal(t, expectedDBName, constructedDBName) } diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdoc_conv.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdoc_conv.go index 9afb3e8fbc3..e428625b6ca 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdoc_conv.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/couchdoc_conv.go @@ -14,7 +14,6 @@ import ( "github.com/hyperledger/fabric/core/ledger/internal/version" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" "github.com/pkg/errors" ) @@ -66,13 +65,13 @@ func (v jsonValue) toBytes() ([]byte, error) { return jsonBytes, err } -func couchDocToKeyValue(doc *couchdb.CouchDoc) (*keyValue, error) { +func couchDocToKeyValue(doc *couchDoc) (*keyValue, error) { // initialize the return value var returnValue []byte var err error // create a generic map unmarshal the json jsonResult := make(map[string]interface{}) - decoder := json.NewDecoder(bytes.NewBuffer(doc.JSONValue)) + decoder := json.NewDecoder(bytes.NewBuffer(doc.jsonValue)) decoder.UseNumber() if err = decoder.Decode(&jsonResult); err != nil { return nil, err @@ -99,9 +98,9 @@ func couchDocToKeyValue(doc *couchdb.CouchDoc) (*keyValue, error) { delete(jsonResult, versionField) // handle binary or json data - if doc.Attachments != nil { // binary attachment + if doc.attachments != nil { // binary attachment // get binary data from attachment - for _, attachment := range doc.Attachments { + for _, attachment := range doc.attachments { if attachment.Name == binaryWrapper { returnValue = attachment.AttachmentBytes } @@ -121,7 +120,7 @@ func couchDocToKeyValue(doc *couchdb.CouchDoc) (*keyValue, error) { }, nil } -func keyValToCouchDoc(kv *keyValue) (*couchdb.CouchDoc, error) { +func keyValToCouchDoc(kv *keyValue) (*couchDoc, error) { type kvType int32 const ( kvTypeDelete = iota @@ -167,14 +166,14 @@ func keyValToCouchDoc(kv *keyValue) (*couchdb.CouchDoc, error) { if err != nil { return nil, err } - couchDoc := &couchdb.CouchDoc{JSONValue: jsonBytes} + couchDoc := &couchDoc{jsonValue: jsonBytes} if kvtype == kvTypeAttachment { - attachment := &couchdb.AttachmentInfo{} + attachment := &attachmentInfo{} attachment.AttachmentBytes = value attachment.ContentType = "application/octet-stream" attachment.Name = binaryWrapper - attachments := append([]*couchdb.AttachmentInfo{}, attachment) - couchDoc.Attachments = attachments + attachments := append([]*attachmentInfo{}, attachment) + couchDoc.attachments = attachments } return couchDoc, nil } @@ -185,7 +184,7 @@ type couchSavepointData struct { TxNum uint64 `json:"TxNum"` } -func encodeSavepoint(height *version.Height) (*couchdb.CouchDoc, error) { +func encodeSavepoint(height *version.Height) (*couchDoc, error) { var err error var savepointDoc couchSavepointData // construct savepoint document @@ -197,12 +196,12 @@ func encodeSavepoint(height *version.Height) (*couchdb.CouchDoc, error) { logger.Errorf("%+v", err) return nil, err } - return &couchdb.CouchDoc{JSONValue: savepointDocJSON, Attachments: nil}, nil + return &couchDoc{jsonValue: savepointDocJSON, attachments: nil}, nil } -func decodeSavepoint(couchDoc *couchdb.CouchDoc) (*version.Height, error) { +func decodeSavepoint(couchDoc *couchDoc) (*version.Height, error) { savepointDoc := &couchSavepointData{} - if err := json.Unmarshal(couchDoc.JSONValue, &savepointDoc); err != nil { + if err := json.Unmarshal(couchDoc.jsonValue, &savepointDoc); err != nil { err = errors.Wrap(err, "failed to unmarshal savepoint data") logger.Errorf("%+v", err) return nil, err @@ -214,7 +213,7 @@ type dataformatInfo struct { Version string `json:"Version"` } -func encodeDataformatInfo(dataFormatVersion string) (*couchdb.CouchDoc, error) { +func encodeDataformatInfo(dataFormatVersion string) (*couchDoc, error) { var err error dataformatInfo := &dataformatInfo{ Version: dataFormatVersion, @@ -225,13 +224,13 @@ func encodeDataformatInfo(dataFormatVersion string) (*couchdb.CouchDoc, error) { logger.Errorf("%+v", err) return nil, err } - return &couchdb.CouchDoc{JSONValue: dataformatInfoJSON, Attachments: nil}, nil + return &couchDoc{jsonValue: dataformatInfoJSON, attachments: nil}, nil } -func decodeDataformatInfo(couchDoc *couchdb.CouchDoc) (string, error) { +func decodeDataformatInfo(couchDoc *couchDoc) (string, error) { dataformatInfo := &dataformatInfo{} - if err := json.Unmarshal(couchDoc.JSONValue, dataformatInfo); err != nil { - err = errors.Wrapf(err, "failed to unmarshal json [%#v] into dataformatInfo", couchDoc.JSONValue) + if err := json.Unmarshal(couchDoc.jsonValue, dataformatInfo); err != nil { + err = errors.Wrapf(err, "failed to unmarshal json [%#v] into dataformatInfo", couchDoc.jsonValue) logger.Errorf("%+v", err) return "", err } diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metadata_retrieval.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metadata_retrieval.go index 3fd12a5296a..6bdd69825b4 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metadata_retrieval.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metadata_retrieval.go @@ -8,17 +8,15 @@ package statecouchdb import ( "fmt" - - "github.com/hyperledger/fabric/core/ledger/util/couchdb" ) // nsMetadataRetriever implements `batch` interface and wraps the function `retrieveNsMetadata` // for allowing parallel execution of this function for different namespaces type nsMetadataRetriever struct { ns string - db *couchdb.CouchDatabase + db *couchDatabase keys []string - executionResult []*couchdb.DocMetadata + executionResult []*docMetadata } // subNsMetadataRetriever implements `batch` interface and wraps the function @@ -30,7 +28,7 @@ type nsMetadataRetriever struct { type subNsMetadataRetriever nsMetadataRetriever // retrievedMetadata retrieves the metadata for a collection of `namespace-keys` combination -func (vdb *VersionedDB) retrieveMetadata(nsKeysMap map[string][]string) (map[string][]*couchdb.DocMetadata, error) { +func (vdb *VersionedDB) retrieveMetadata(nsKeysMap map[string][]string) (map[string][]*docMetadata, error) { // construct one batch per namespace nsMetadataRetrievers := []batch{} for ns, keys := range nsKeysMap { @@ -44,7 +42,7 @@ func (vdb *VersionedDB) retrieveMetadata(nsKeysMap map[string][]string) (map[str return nil, err } // accumulate results from each batch - executionResults := make(map[string][]*couchdb.DocMetadata) + executionResults := make(map[string][]*docMetadata) for _, r := range nsMetadataRetrievers { nsMetadataRetriever := r.(*nsMetadataRetriever) executionResults[nsMetadataRetriever.ns] = nsMetadataRetriever.executionResult @@ -53,9 +51,9 @@ func (vdb *VersionedDB) retrieveMetadata(nsKeysMap map[string][]string) (map[str } // retrieveNsMetadata retrieves metadata for a given namespace -func retrieveNsMetadata(db *couchdb.CouchDatabase, keys []string) ([]*couchdb.DocMetadata, error) { +func retrieveNsMetadata(db *couchDatabase, keys []string) ([]*docMetadata, error) { // construct one batch per group of keys based on maxBatchSize - maxBatchSize := db.CouchInstance.MaxBatchUpdateSize() + maxBatchSize := db.couchInstance.maxBatchUpdateSize() batches := []batch{} remainingKeys := keys for { @@ -71,7 +69,7 @@ func retrieveNsMetadata(db *couchdb.CouchDatabase, keys []string) ([]*couchdb.Do return nil, err } // accumulate results from each batch - var executionResults []*couchdb.DocMetadata + var executionResults []*docMetadata for _, b := range batches { executionResults = append(executionResults, b.(*subNsMetadataRetriever).executionResult...) } @@ -92,7 +90,7 @@ func (r *nsMetadataRetriever) String() string { func (b *subNsMetadataRetriever) execute() error { var err error - if b.executionResult, err = b.db.BatchRetrieveDocumentMetadata(b.keys); err != nil { + if b.executionResult, err = b.db.batchRetrieveDocumentMetadata(b.keys); err != nil { return err } return nil diff --git a/core/ledger/util/couchdb/metrics.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metrics.go similarity index 97% rename from core/ledger/util/couchdb/metrics.go rename to core/ledger/kvledger/txmgmt/statedb/statecouchdb/metrics.go index e521aac6624..0746f3ff31e 100644 --- a/core/ledger/util/couchdb/metrics.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metrics.go @@ -4,7 +4,7 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package couchdb +package statecouchdb import ( "time" diff --git a/core/ledger/util/couchdb/metrics_test.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metrics_test.go similarity index 81% rename from core/ledger/util/couchdb/metrics_test.go rename to core/ledger/kvledger/txmgmt/statedb/statecouchdb/metrics_test.go index 542ca929b62..2f6c0570673 100644 --- a/core/ledger/util/couchdb/metrics_test.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/metrics_test.go @@ -4,7 +4,7 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package couchdb +package statecouchdb import ( "context" @@ -24,8 +24,12 @@ func TestAPIProcessTimeMetric(t *testing.T) { // create a new couch instance config := testConfig() + couchDBEnv.startCouchDB(t) + config.Address = couchDBEnv.couchAddress + defer couchDBEnv.cleanup(config) + defaultMaxRetries := config.MaxRetries config.MaxRetries = 0 - couchInstance, err := CreateCouchInstance(config, &disabled.Provider{}) + couchInstance, err := createCouchInstance(config, &disabled.Provider{}) gt.Expect(err).NotTo(HaveOccurred(), "Error when trying to create couch instance") couchInstance.stats = &stats{ @@ -43,4 +47,5 @@ func TestAPIProcessTimeMetric(t *testing.T) { "function_name", "function_name", "result", "0", })) + config.MaxRetries = defaultMaxRetries } diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/redolog_test.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/redolog_test.go index 7bc6768f3e3..5ec02464bc2 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/redolog_test.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/redolog_test.go @@ -63,14 +63,14 @@ func TestRedoLogger(t *testing.T) { } func TestCouchdbRedoLogger(t *testing.T) { - testEnv.init(t, nil) - defer testEnv.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() // commitToRedologAndRestart - a helper function that commits directly to redologs and restart the statedb commitToRedologAndRestart := func(newVal string, version *version.Height) { batch := statedb.NewUpdateBatch() batch.Put("ns1", "key1", []byte(newVal), version) - db, err := testEnv.DBProvider.GetDBHandle("testcouchdbredologger") + db, err := vdbEnv.DBProvider.GetDBHandle("testcouchdbredologger") assert.NoError(t, err) vdb := db.(*VersionedDB) assert.NoError(t, @@ -81,11 +81,11 @@ func TestCouchdbRedoLogger(t *testing.T) { }, ), ) - testEnv.closeAndReopen() + vdbEnv.closeAndReopen() } // verifyExpectedVal - a helper function that verifies the statedb contents verifyExpectedVal := func(expectedVal string, expectedSavepoint *version.Height) { - db, err := testEnv.DBProvider.GetDBHandle("testcouchdbredologger") + db, err := vdbEnv.DBProvider.GetDBHandle("testcouchdbredologger") assert.NoError(t, err) vdb := db.(*VersionedDB) vv, err := vdb.GetState("ns1", "key1") @@ -97,7 +97,7 @@ func TestCouchdbRedoLogger(t *testing.T) { } // initialize statedb with initial set of writes - db, err := testEnv.DBProvider.GetDBHandle("testcouchdbredologger") + db, err := vdbEnv.DBProvider.GetDBHandle("testcouchdbredologger") if err != nil { t.Fatalf("Failed to get database handle: %s", err) } @@ -119,7 +119,7 @@ func TestCouchdbRedoLogger(t *testing.T) { verifyExpectedVal("value2", version.NewHeight(2, 1)) // A nil height should cause skipping the writing of redo-record - db, _ = testEnv.DBProvider.GetDBHandle("testcouchdbredologger") + db, _ = vdbEnv.DBProvider.GetDBHandle("testcouchdbredologger") vdb = db.(*VersionedDB) vdb.ApplyUpdates(batch1, nil) record, err := vdb.redoLogger.load() @@ -128,7 +128,7 @@ func TestCouchdbRedoLogger(t *testing.T) { assert.Equal(t, []byte("value3"), record.UpdateBatch.Get("ns1", "key1").Value) // A batch that does not contain PostOrderWrites should cause skipping the writing of redo-record - db, _ = testEnv.DBProvider.GetDBHandle("testcouchdbredologger") + db, _ = vdbEnv.DBProvider.GetDBHandle("testcouchdbredologger") vdb = db.(*VersionedDB) batchWithNoGeneratedWrites := batch1 batchWithNoGeneratedWrites.ContainsPostOrderWrites = false @@ -139,7 +139,7 @@ func TestCouchdbRedoLogger(t *testing.T) { assert.Equal(t, []byte("value3"), record.UpdateBatch.Get("ns1", "key1").Value) // A batch that contains PostOrderWrites should cause writing of redo-record - db, _ = testEnv.DBProvider.GetDBHandle("testcouchdbredologger") + db, _ = vdbEnv.DBProvider.GetDBHandle("testcouchdbredologger") vdb = db.(*VersionedDB) batchWithGeneratedWrites := batch1 batchWithGeneratedWrites.ContainsPostOrderWrites = true diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb.go index 746ad96b0f9..022941fd871 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb.go @@ -16,9 +16,9 @@ import ( "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/ledger/dataformat" "github.com/hyperledger/fabric/common/metrics" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/internal/version" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" "github.com/pkg/errors" ) @@ -36,7 +36,7 @@ const ( // VersionedDBProvider implements interface VersionedDBProvider type VersionedDBProvider struct { - couchInstance *couchdb.CouchInstance + couchInstance *couchInstance databases map[string]*VersionedDB mux sync.Mutex openCounts uint64 @@ -45,9 +45,9 @@ type VersionedDBProvider struct { } // NewVersionedDBProvider instantiates VersionedDBProvider -func NewVersionedDBProvider(config *couchdb.Config, metricsProvider metrics.Provider, sysNamespaces []string) (*VersionedDBProvider, error) { +func NewVersionedDBProvider(config *ledger.CouchDBConfig, metricsProvider metrics.Provider, sysNamespaces []string) (*VersionedDBProvider, error) { logger.Debugf("constructing CouchDB VersionedDBProvider") - couchInstance, err := couchdb.CreateCouchInstance(config, metricsProvider) + couchInstance, err := createCouchInstance(config, metricsProvider) if err != nil { return nil, err } @@ -71,9 +71,9 @@ func NewVersionedDBProvider(config *couchdb.Config, metricsProvider metrics.Prov nil } -func checkExpectedDataformatVersion(couchInstance *couchdb.CouchInstance) error { +func checkExpectedDataformatVersion(couchInstance *couchInstance) error { databasesToIgnore := []string{fabricInternalDBName} - isEmpty, err := couchInstance.IsEmpty(databasesToIgnore) + isEmpty, err := couchInstance.isEmpty(databasesToIgnore) if err != nil { return err } @@ -95,12 +95,12 @@ func checkExpectedDataformatVersion(couchInstance *couchdb.CouchInstance) error return nil } -func readDataformatVersion(couchInstance *couchdb.CouchInstance) (string, error) { - db, err := couchdb.CreateCouchDatabase(couchInstance, fabricInternalDBName) +func readDataformatVersion(couchInstance *couchInstance) (string, error) { + db, err := createCouchDatabase(couchInstance, fabricInternalDBName) if err != nil { return "", err } - doc, _, err := db.ReadDoc(dataformatVersionDocID) + doc, _, err := db.readDoc(dataformatVersionDocID) logger.Debugf("dataformatVersionDoc = %s", doc) if err != nil || doc == nil { return "", err @@ -108,8 +108,8 @@ func readDataformatVersion(couchInstance *couchdb.CouchInstance) (string, error) return decodeDataformatInfo(doc) } -func writeDataFormatVersion(couchInstance *couchdb.CouchInstance, dataformatVersion string) error { - db, err := couchdb.CreateCouchDatabase(couchInstance, fabricInternalDBName) +func writeDataFormatVersion(couchInstance *couchInstance, dataformatVersion string) error { + db, err := createCouchDatabase(couchInstance, fabricInternalDBName) if err != nil { return err } @@ -117,10 +117,10 @@ func writeDataFormatVersion(couchInstance *couchdb.CouchInstance, dataformatVers if err != nil { return err } - if _, err := db.SaveDoc(dataformatVersionDocID, "", doc); err != nil { + if _, err := db.saveDoc(dataformatVersionDocID, "", doc); err != nil { return err } - dbResponse, err := db.EnsureFullCommit() + dbResponse, err := db.ensureFullCommit() if err != nil { return err @@ -161,16 +161,16 @@ func (provider *VersionedDBProvider) Close() { // HealthCheck checks to see if the couch instance of the peer is healthy func (provider *VersionedDBProvider) HealthCheck(ctx context.Context) error { - return provider.couchInstance.HealthCheck(ctx) + return provider.couchInstance.healthCheck(ctx) } // VersionedDB implements VersionedDB interface type VersionedDB struct { - couchInstance *couchdb.CouchInstance - metadataDB *couchdb.CouchDatabase // A database per channel to store metadata such as savepoint. - chainName string // The name of the chain/channel. - namespaceDBs map[string]*couchdb.CouchDatabase // One database per deployed chaincode. - committedDataCache *versionsCache // Used as a local cache during bulk processing of a block. + couchInstance *couchInstance + metadataDB *couchDatabase // A database per channel to store metadata such as savepoint. + chainName string // The name of the chain/channel. + namespaceDBs map[string]*couchDatabase // One database per deployed chaincode. + committedDataCache *versionsCache // Used as a local cache during bulk processing of a block. verCacheLock sync.RWMutex mux sync.RWMutex redoLogger *redoLogger @@ -178,16 +178,16 @@ type VersionedDB struct { } // newVersionedDB constructs an instance of VersionedDB -func newVersionedDB(couchInstance *couchdb.CouchInstance, redoLogger *redoLogger, dbName string, cache *cache) (*VersionedDB, error) { +func newVersionedDB(couchInstance *couchInstance, redoLogger *redoLogger, dbName string, cache *cache) (*VersionedDB, error) { // CreateCouchDatabase creates a CouchDB database object, as well as the underlying database if it does not exist chainName := dbName - dbName = couchdb.ConstructMetadataDBName(dbName) + dbName = constructMetadataDBName(dbName) - metadataDB, err := couchdb.CreateCouchDatabase(couchInstance, dbName) + metadataDB, err := createCouchDatabase(couchInstance, dbName) if err != nil { return nil, err } - namespaceDBMap := make(map[string]*couchdb.CouchDatabase) + namespaceDBMap := make(map[string]*couchDatabase) vdb := &VersionedDB{ couchInstance: couchInstance, metadataDB: metadataDB, @@ -228,20 +228,20 @@ func newVersionedDB(couchInstance *couchdb.CouchInstance, redoLogger *redoLogger } // getNamespaceDBHandle gets the handle to a named chaincode database -func (vdb *VersionedDB) getNamespaceDBHandle(namespace string) (*couchdb.CouchDatabase, error) { +func (vdb *VersionedDB) getNamespaceDBHandle(namespace string) (*couchDatabase, error) { vdb.mux.RLock() db := vdb.namespaceDBs[namespace] vdb.mux.RUnlock() if db != nil { return db, nil } - namespaceDBName := couchdb.ConstructNamespaceDBName(vdb.chainName, namespace) + namespaceDBName := constructNamespaceDBName(vdb.chainName, namespace) vdb.mux.Lock() defer vdb.mux.Unlock() db = vdb.namespaceDBs[namespace] if db == nil { var err error - db, err = couchdb.CreateCouchDatabase(vdb.couchInstance, namespaceDBName) + db, err = createCouchDatabase(vdb.couchInstance, namespaceDBName) if err != nil { return nil, err } @@ -257,7 +257,7 @@ func (vdb *VersionedDB) ProcessIndexesForChaincodeDeploy(namespace string, index return err } for indexFileName, indexData := range indexFilesData { - _, err = db.CreateIndex(string(indexData)) + _, err = db.createIndex(string(indexData)) if err != nil { return errors.WithMessagef(err, "error creating index from file [%s] for channel [%s]", indexFileName, namespace) } @@ -408,7 +408,7 @@ func (vdb *VersionedDB) readFromDB(namespace, key string) (*keyValue, error) { if err != nil { return nil, err } - couchDoc, _, err := db.ReadDoc(key) + couchDoc, _, err := db.readDoc(key) if err != nil { return nil, err } @@ -452,7 +452,7 @@ const optionLimit = "limit" func (vdb *VersionedDB) GetStateRangeScanIteratorWithMetadata(namespace string, startKey string, endKey string, metadata map[string]interface{}) (statedb.QueryResultsIterator, error) { logger.Debugf("Entering GetStateRangeScanIteratorWithMetadata namespace: %s startKey: %s endKey: %s metadata: %v", namespace, startKey, endKey, metadata) // Get the internalQueryLimit from core.yaml - internalQueryLimit := vdb.couchInstance.InternalQueryLimit() + internalQueryLimit := vdb.couchInstance.internalQueryLimit() requestedLimit := int32(0) // if metadata is provided, validate and apply options if metadata != nil { @@ -491,20 +491,20 @@ func (scanner *queryScanner) getNextStateRangeScanResults() error { return nil } -func rangeScanFilterCouchInternalDocs(db *couchdb.CouchDatabase, +func rangeScanFilterCouchInternalDocs(db *couchDatabase, startKey, endKey string, queryLimit int32, -) ([]*couchdb.QueryResult, string, error) { - var finalResults []*couchdb.QueryResult +) ([]*queryResult, string, error) { + var finalResults []*queryResult var finalNextStartKey string for { - results, nextStartKey, err := db.ReadDocRange(startKey, endKey, queryLimit) + results, nextStartKey, err := db.readDocRange(startKey, endKey, queryLimit) if err != nil { logger.Debugf("Error calling ReadDocRange(): %s\n", err.Error()) return nil, "", err } - var filteredResults []*couchdb.QueryResult + var filteredResults []*queryResult for _, doc := range results { - if !isCouchInternalKey(doc.ID) { + if !isCouchInternalKey(doc.id) { filteredResults = append(filteredResults, doc) } } @@ -519,7 +519,7 @@ func rangeScanFilterCouchInternalDocs(db *couchdb.CouchDatabase, } var err error for i := 0; isCouchInternalKey(finalNextStartKey); i++ { - _, finalNextStartKey, err = db.ReadDocRange(finalNextStartKey, endKey, 1) + _, finalNextStartKey, err = db.readDocRange(finalNextStartKey, endKey, 1) logger.Debugf("i=%d, finalNextStartKey=%s", i, finalNextStartKey) if err != nil { return nil, "", err @@ -545,7 +545,7 @@ func (vdb *VersionedDB) ExecuteQuery(namespace, query string) (statedb.ResultsIt func (vdb *VersionedDB) ExecuteQueryWithMetadata(namespace, query string, metadata map[string]interface{}) (statedb.QueryResultsIterator, error) { logger.Debugf("Entering ExecuteQueryWithMetadata namespace: %s, query: %s, metadata: %v", namespace, query, metadata) // Get the querylimit from core.yaml - internalQueryLimit := vdb.couchInstance.InternalQueryLimit() + internalQueryLimit := vdb.couchInstance.internalQueryLimit() bookmark := "" requestedLimit := int32(0) // if metadata is provided, then validate and set provided options @@ -588,7 +588,7 @@ func (scanner *queryScanner) executeQueryWithBookmark() error { logger.Debugf("Error calling applyAdditionalQueryOptions(): %s\n", err.Error()) return err } - queryResult, bookmark, err := scanner.db.QueryDocuments(queryString) + queryResult, bookmark, err := scanner.db.queryDocuments(queryString) if err != nil { logger.Debugf("Error calling QueryDocuments(): %s\n", err.Error()) return err @@ -748,7 +748,7 @@ func (vdb *VersionedDB) ensureFullCommitAndRecordSavepoint(height *version.Heigh errsChan <- err return } - _, err = db.EnsureFullCommit() + _, err = db.ensureFullCommit() if err != nil { errsChan <- err return @@ -778,7 +778,7 @@ func (vdb *VersionedDB) ensureFullCommitAndRecordSavepoint(height *version.Heigh if err != nil { return err } - _, err = vdb.metadataDB.SaveDoc(savepointDocID, "", savepointCouchDoc) + _, err = vdb.metadataDB.saveDoc(savepointDocID, "", savepointCouchDoc) if err != nil { logger.Errorf("Failed to save the savepoint to DB %s", err.Error()) return err @@ -793,13 +793,13 @@ func (vdb *VersionedDB) ensureFullCommitAndRecordSavepoint(height *version.Heigh // GetLatestSavePoint implements method in VersionedDB interface func (vdb *VersionedDB) GetLatestSavePoint() (*version.Height, error) { var err error - couchDoc, _, err := vdb.metadataDB.ReadDoc(savepointDocID) + couchDoc, _, err := vdb.metadataDB.readDoc(savepointDocID) if err != nil { logger.Errorf("Failed to read savepoint data %s", err.Error()) return nil, err } // ReadDoc() not found (404) will result in nil response, in these cases return height nil - if couchDoc == nil || couchDoc.JSONValue == nil { + if couchDoc == nil || couchDoc.jsonValue == nil { return nil, nil } return decodeSavepoint(couchDoc) @@ -848,7 +848,7 @@ func applyAdditionalQueryOptions(queryString string, queryLimit int32, queryBook type queryScanner struct { namespace string - db *couchdb.CouchDatabase + db *couchDatabase queryDefinition *queryDefinition paginationInfo *paginationInfo resultsInfo *resultsInfo @@ -869,10 +869,10 @@ type paginationInfo struct { type resultsInfo struct { totalRecordsReturned int32 - results []*couchdb.QueryResult + results []*queryResult } -func newQueryScanner(namespace string, db *couchdb.CouchDatabase, query string, internalQueryLimit, +func newQueryScanner(namespace string, db *couchDatabase, query string, internalQueryLimit, limit int32, bookmark, startKey, endKey string) (*queryScanner, error) { scanner := &queryScanner{namespace, db, &queryDefinition{startKey, endKey, query, internalQueryLimit}, &paginationInfo{-1, limit, bookmark}, &resultsInfo{0, nil}} var err error @@ -919,9 +919,9 @@ func (scanner *queryScanner) Next() (statedb.QueryResult, error) { return nil, nil } selectedResultRecord := scanner.resultsInfo.results[scanner.paginationInfo.cursor] - key := selectedResultRecord.ID + key := selectedResultRecord.id // remove the reserved fields from CouchDB JSON and return the value and version - kv, err := couchDocToKeyValue(&couchdb.CouchDoc{JSONValue: selectedResultRecord.Value, Attachments: selectedResultRecord.Attachments}) + kv, err := couchDocToKeyValue(&couchDoc{jsonValue: selectedResultRecord.value, attachments: selectedResultRecord.attachments}) if err != nil { return nil, err } diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test.go index 0574de9a322..58530ca191c 100644 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test.go +++ b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test.go @@ -17,43 +17,139 @@ import ( "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/ledger/dataformat" "github.com/hyperledger/fabric/common/metrics/disabled" + "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/internal/version" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb/commontests" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// couchDB backed versioned DB test environment. -var testEnv = &testVDBEnv{} +// testVDBEnv provides a couch db backed versioned db for testing +type testVDBEnv struct { + t *testing.T + DBProvider statedb.VersionedDBProvider + config *ledger.CouchDBConfig + cache *cache + sysNamespaces []string + couchDBEnv *testCouchDBEnv +} + +func (env *testVDBEnv) init(t *testing.T, sysNamespaces []string) { + t.Logf("Initializing TestVDBEnv") + + if env.couchDBEnv == nil { + couchDBEnv := &testCouchDBEnv{} + couchDBEnv.startCouchDB(t) + env.couchDBEnv = couchDBEnv + } + + redoPath, err := ioutil.TempDir("", "cvdbenv") + if err != nil { + t.Fatalf("Failed to create redo log directory: %s", err) + } + config := &ledger.CouchDBConfig{ + Address: env.couchDBEnv.couchAddress, + Username: "", + Password: "", + InternalQueryLimit: 1000, + MaxBatchUpdateSize: 1000, + MaxRetries: 3, + MaxRetriesOnStartup: 20, + RequestTimeout: 35 * time.Second, + RedoLogPath: redoPath, + UserCacheSizeMBs: 8, + } + + dbProvider, err := NewVersionedDBProvider(config, &disabled.Provider{}, sysNamespaces) + if err != nil { + t.Fatalf("Error creating CouchDB Provider: %s", err) + } + + env.t = t + env.config = config + env.DBProvider = dbProvider + env.config = config + env.cache = dbProvider.cache + env.sysNamespaces = sysNamespaces +} + +func (env *testVDBEnv) closeAndReopen() { + env.DBProvider.Close() + dbProvider, _ := NewVersionedDBProvider(env.config, &disabled.Provider{}, env.sysNamespaces) + env.DBProvider = dbProvider + env.cache = dbProvider.cache +} + +// Cleanup drops the test couch databases and closes the db provider +func (env *testVDBEnv) cleanup() { + env.t.Logf("Cleaningup TestVDBEnv") + if env.DBProvider != nil { + env.DBProvider.Close() + } + env.couchDBEnv.cleanup(env.config) + os.RemoveAll(env.config.RedoLogPath) +} + +// testVDBEnv provides a couch db for testing +type testCouchDBEnv struct { + t *testing.T + couchAddress string + cleanupCouchDB func() +} + +// startCouchDB starts external couchDB resources for testCouchDBEnv. +func (env *testCouchDBEnv) startCouchDB(t *testing.T) { + if env.couchAddress != "" { + return + } + env.t = t + env.couchAddress, env.cleanupCouchDB = StartCouchDB(t, nil) +} + +// stopCouchDB stops external couchDB resources. +func (env *testCouchDBEnv) stopCouchDB() { + if env.couchAddress != "" { + env.cleanupCouchDB() + } +} + +func (env *testCouchDBEnv) cleanup(config *ledger.CouchDBConfig) { + DeleteApplicationDBs(env.t, config) +} + +// we create two CouchDB instances/containers---one is used to test the +// functionality of the versionedDB and another for testing the CouchDB +// util functions. +var vdbEnv = &testVDBEnv{} +var couchDBEnv = &testCouchDBEnv{} func TestMain(m *testing.M) { flogging.ActivateSpec("statecouchdb=debug") rc := m.Run() - testEnv.stopExternalResource() + if vdbEnv.couchDBEnv != nil { + vdbEnv.couchDBEnv.stopCouchDB() + } + couchDBEnv.stopCouchDB() os.Exit(rc) } func TestBasicRW(t *testing.T) { - env := testEnv - env.init(t, nil) - - defer env.cleanup() - commontests.TestBasicRW(t, env.DBProvider) + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() + commontests.TestBasicRW(t, vdbEnv.DBProvider) } // TestGetStateFromCache checks cache hits, cache misses, and cache // updates during GetState call. func TestGetStateFromCache(t *testing.T) { - env := testEnv - env.init(t, []string{"lscc", "_lifecycle"}) - defer env.cleanup() + vdbEnv.init(t, []string{"lscc", "_lifecycle"}) + defer vdbEnv.cleanup() chainID := "testgetstatefromcache" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // scenario 1: get state would receives a @@ -64,7 +160,7 @@ func TestGetStateFromCache(t *testing.T) { vv, err := db.GetState("ns", "key1") require.NoError(t, err) require.Nil(t, vv) - testDoesNotExistInCache(t, env.cache, chainID, "ns", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns", "key1") // scenario 2: get state would receive a cache hit. // directly store an entry in the cache @@ -74,7 +170,7 @@ func TestGetStateFromCache(t *testing.T) { Version: version.NewHeight(1, 1).ToBytes(), AdditionalInfo: []byte("rev1"), } - require.NoError(t, env.cache.putState(chainID, "ns", "key1", cacheValue)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns", "key1", cacheValue)) vv, err = db.GetState("ns", "key1") expectedVV, err := constructVersionedValue(cacheValue) @@ -95,7 +191,7 @@ func TestGetStateFromCache(t *testing.T) { db.ApplyUpdates(batch, savePoint) // Note that the ApplyUpdates() updates only the existing entry in the cache. Currently, the // cache has only ns, key1 but we are storing lscc, key1. Hence, no changes would happen in the cache. - testDoesNotExistInCache(t, env.cache, chainID, "lscc", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "lscc", "key1") // calling GetState() would update the cache vv, err = db.GetState("lscc", "key1") @@ -105,18 +201,17 @@ func TestGetStateFromCache(t *testing.T) { // cache should have been updated with lscc, key1 nsdb, err := db.(*VersionedDB).getNamespaceDBHandle("lscc") require.NoError(t, err) - testExistInCache(t, nsdb, env.cache, chainID, "lscc", "key1", vv2) + testExistInCache(t, nsdb, vdbEnv.cache, chainID, "lscc", "key1", vv2) } // TestGetVersionFromCache checks cache hits, cache misses, and // updates during GetVersion call. func TestGetVersionFromCache(t *testing.T) { - env := testEnv - env.init(t, []string{"lscc", "_lifecycle"}) - defer env.cleanup() + vdbEnv.init(t, []string{"lscc", "_lifecycle"}) + defer vdbEnv.cleanup() chainID := "testgetstatefromcache" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // scenario 1: get version would receives a @@ -127,7 +222,7 @@ func TestGetVersionFromCache(t *testing.T) { ver, err := db.GetVersion("ns", "key1") require.Nil(t, err) require.Nil(t, ver) - testDoesNotExistInCache(t, env.cache, chainID, "ns", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns", "key1") // scenario 2: get version would receive a cache hit. // directly store an entry in the cache @@ -137,7 +232,7 @@ func TestGetVersionFromCache(t *testing.T) { Version: version.NewHeight(1, 1).ToBytes(), AdditionalInfo: []byte("rev1"), } - require.NoError(t, env.cache.putState(chainID, "ns", "key1", cacheValue)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns", "key1", cacheValue)) ver, err = db.GetVersion("ns", "key1") expectedVer, _, err := version.NewHeightFromBytes(cacheValue.Version) @@ -158,7 +253,7 @@ func TestGetVersionFromCache(t *testing.T) { db.ApplyUpdates(batch, savePoint) // Note that the ApplyUpdates() updates only the existing entry in the cache. Currently, the // cache has only ns, key1 but we are storing lscc, key1. Hence, no changes would happen in the cache. - testDoesNotExistInCache(t, env.cache, chainID, "lscc", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "lscc", "key1") // calling GetVersion() would update the cache ver, err = db.GetVersion("lscc", "key1") @@ -168,18 +263,17 @@ func TestGetVersionFromCache(t *testing.T) { // cache should have been updated with lscc, key1 nsdb, err := db.(*VersionedDB).getNamespaceDBHandle("lscc") require.NoError(t, err) - testExistInCache(t, nsdb, env.cache, chainID, "lscc", "key1", vv2) + testExistInCache(t, nsdb, vdbEnv.cache, chainID, "lscc", "key1", vv2) } // TestGetMultipleStatesFromCache checks cache hits, cache misses, // and updates during GetStateMultipleKeys call. func TestGetMultipleStatesFromCache(t *testing.T) { - env := testEnv - env.init(t, []string{"lscc", "_lifecycle"}) - defer env.cleanup() + vdbEnv.init(t, []string{"lscc", "_lifecycle"}) + defer vdbEnv.cleanup() chainID := "testgetmultiplestatesfromcache" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // scenario: given 5 keys, get multiple states find @@ -195,14 +289,14 @@ func TestGetMultipleStatesFromCache(t *testing.T) { Version: version.NewHeight(1, 1).ToBytes(), AdditionalInfo: []byte("rev1"), } - require.NoError(t, env.cache.putState(chainID, "ns", "key1", cacheValue1)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns", "key1", cacheValue1)) cacheValue2 := &CacheValue{ Value: []byte("value2"), Metadata: []byte("meta2"), Version: version.NewHeight(1, 1).ToBytes(), AdditionalInfo: []byte("rev2"), } - require.NoError(t, env.cache.putState(chainID, "ns", "key2", cacheValue2)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns", "key2", cacheValue2)) // key3 and key4 exist only in the db batch := statedb.NewUpdateBatch() @@ -213,8 +307,8 @@ func TestGetMultipleStatesFromCache(t *testing.T) { savePoint := version.NewHeight(1, 2) db.ApplyUpdates(batch, savePoint) - testDoesNotExistInCache(t, env.cache, chainID, "ns", "key3") - testDoesNotExistInCache(t, env.cache, chainID, "ns", "key4") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns", "key3") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns", "key4") // key5 does not exist at all while key3 and key4 does not exist in the cache vvalues, err := db.GetStateMultipleKeys("ns", []string{"key1", "key2", "key3", "key4", "key5"}) @@ -228,19 +322,18 @@ func TestGetMultipleStatesFromCache(t *testing.T) { // cache should have been updated with key3 and key4 nsdb, err := db.(*VersionedDB).getNamespaceDBHandle("ns") require.NoError(t, err) - testExistInCache(t, nsdb, env.cache, chainID, "ns", "key3", vv3) - testExistInCache(t, nsdb, env.cache, chainID, "ns", "key4", vv4) + testExistInCache(t, nsdb, vdbEnv.cache, chainID, "ns", "key3", vv3) + testExistInCache(t, nsdb, vdbEnv.cache, chainID, "ns", "key4", vv4) } // TestCacheUpdatesAfterCommit checks whether the cache is updated // after a commit of a update batch. func TestCacheUpdatesAfterCommit(t *testing.T) { - env := testEnv - env.init(t, []string{"lscc", "_lifecycle"}) - defer env.cleanup() + vdbEnv.init(t, []string{"lscc", "_lifecycle"}) + defer vdbEnv.cleanup() chainID := "testcacheupdatesaftercommit" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // scenario: cache has 4 keys while the commit operation @@ -264,10 +357,10 @@ func TestCacheUpdatesAfterCommit(t *testing.T) { db.ApplyUpdates(batch, savePoint) // key1, key2 in ns1 and ns2 would not be in cache - testDoesNotExistInCache(t, env.cache, chainID, "ns1", "key1") - testDoesNotExistInCache(t, env.cache, chainID, "ns1", "key2") - testDoesNotExistInCache(t, env.cache, chainID, "ns2", "key1") - testDoesNotExistInCache(t, env.cache, chainID, "ns2", "key2") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns1", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns1", "key2") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns2", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns2", "key2") // add key1 and key2 from ns1 to the cache _, err = db.GetState("ns1", "key1") @@ -280,11 +373,11 @@ func TestCacheUpdatesAfterCommit(t *testing.T) { _, err = db.GetState("ns2", "key2") require.NoError(t, err) - v, err := env.cache.getState(chainID, "ns1", "key1") + v, err := vdbEnv.cache.getState(chainID, "ns1", "key1") require.NoError(t, err) ns1key1rev := string(v.AdditionalInfo) - v, err = env.cache.getState(chainID, "ns1", "key2") + v, err = vdbEnv.cache.getState(chainID, "ns1", "key2") require.NoError(t, err) ns1key2rev := string(v.AdditionalInfo) @@ -305,123 +398,111 @@ func TestCacheUpdatesAfterCommit(t *testing.T) { db.ApplyUpdates(batch, savePoint) // cache should have only the update key1 and key2 in ns1 - cacheValue, err := env.cache.getState(chainID, "ns1", "key1") + cacheValue, err := vdbEnv.cache.getState(chainID, "ns1", "key1") require.NoError(t, err) vv, err := constructVersionedValue(cacheValue) require.NoError(t, err) require.Equal(t, vv1Update, vv) require.NotEqual(t, ns1key1rev, string(cacheValue.AdditionalInfo)) - cacheValue, err = env.cache.getState(chainID, "ns1", "key2") + cacheValue, err = vdbEnv.cache.getState(chainID, "ns1", "key2") require.NoError(t, err) vv, err = constructVersionedValue(cacheValue) require.NoError(t, err) require.Equal(t, vv2Update, vv) require.NotEqual(t, ns1key2rev, string(cacheValue.AdditionalInfo)) - testDoesNotExistInCache(t, env.cache, chainID, "ns2", "key1") - testDoesNotExistInCache(t, env.cache, chainID, "ns2", "key2") - testDoesNotExistInCache(t, env.cache, chainID, "ns2", "key3") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns2", "key1") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns2", "key2") + testDoesNotExistInCache(t, vdbEnv.cache, chainID, "ns2", "key3") } func TestMultiDBBasicRW(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestMultiDBBasicRW(t, env.DBProvider) + commontests.TestMultiDBBasicRW(t, vdbEnv.DBProvider) } func TestDeletes(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestDeletes(t, env.DBProvider) + commontests.TestDeletes(t, vdbEnv.DBProvider) } func TestIterator(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestIterator(t, env.DBProvider) + commontests.TestIterator(t, vdbEnv.DBProvider) } // The following tests are unique to couchdb, they are not used in leveldb // query test func TestQuery(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestQuery(t, env.DBProvider) + commontests.TestQuery(t, vdbEnv.DBProvider) } func TestGetStateMultipleKeys(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestGetStateMultipleKeys(t, env.DBProvider) + commontests.TestGetStateMultipleKeys(t, vdbEnv.DBProvider) } func TestGetVersion(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestGetVersion(t, env.DBProvider) + commontests.TestGetVersion(t, vdbEnv.DBProvider) } func TestSmallBatchSize(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestSmallBatchSize(t, env.DBProvider) + commontests.TestSmallBatchSize(t, vdbEnv.DBProvider) } func TestBatchRetry(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestBatchWithIndividualRetry(t, env.DBProvider) + commontests.TestBatchWithIndividualRetry(t, vdbEnv.DBProvider) } func TestValueAndMetadataWrites(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestValueAndMetadataWrites(t, env.DBProvider) + commontests.TestValueAndMetadataWrites(t, vdbEnv.DBProvider) } func TestPaginatedRangeQuery(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestPaginatedRangeQuery(t, env.DBProvider) + commontests.TestPaginatedRangeQuery(t, vdbEnv.DBProvider) } func TestRangeQuerySpecialCharacters(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - commontests.TestRangeQuerySpecialCharacters(t, env.DBProvider) + commontests.TestRangeQuerySpecialCharacters(t, vdbEnv.DBProvider) } // TestUtilityFunctions tests utility functions func TestUtilityFunctions(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - db, err := env.DBProvider.GetDBHandle("testutilityfunctions") + db, err := vdbEnv.DBProvider.GetDBHandle("testutilityfunctions") assert.NoError(t, err) // BytesKeySupported should be false for CouchDB @@ -468,11 +549,10 @@ func TestUtilityFunctions(t *testing.T) { // TestInvalidJSONFields tests for invalid JSON fields func TestInvalidJSONFields(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - db, err := env.DBProvider.GetDBHandle("testinvalidfields") + db, err := vdbEnv.DBProvider.GetDBHandle("testinvalidfields") assert.NoError(t, err) db.Open() @@ -512,7 +592,6 @@ func TestInvalidJSONFields(t *testing.T) { } func TestDebugFunctions(t *testing.T) { - //Test printCompositeKeys // initialize a key list loadKeys := []*statedb.CompositeKey{} @@ -526,11 +605,10 @@ func TestDebugFunctions(t *testing.T) { } func TestHandleChaincodeDeploy(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - db, err := env.DBProvider.GetDBHandle("testinit") + db, err := vdbEnv.DBProvider.GetDBHandle("testinit") assert.NoError(t, err) db.Open() defer db.Close() @@ -594,12 +672,11 @@ func TestHandleChaincodeDeploy(t *testing.T) { assert.Error(t, err, "Error should have been thrown for a missing index") indexCapable, ok := db.(statedb.IndexCapable) - if !ok { t.Fatalf("Couchdb state impl is expected to implement interface `statedb.IndexCapable`") } + assert.NoError(t, indexCapable.ProcessIndexesForChaincodeDeploy("ns1", indexData)) - indexCapable.ProcessIndexesForChaincodeDeploy("ns1", indexData) queryString = `{"selector":{"owner":"fred"}, "sort": [{"size": "desc"}]}` queryUsingIndex := func() bool { _, err = db.ExecuteQuery("ns1", queryString) @@ -630,10 +707,9 @@ func TestTryCastingToJSON(t *testing.T) { func TestHandleChaincodeDeployErroneousIndexFile(t *testing.T) { channelName := "ch1" - env := testEnv - env.init(t, nil) - defer env.cleanup() - db, err := env.DBProvider.GetDBHandle(channelName) + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() + db, err := vdbEnv.DBProvider.GetDBHandle(channelName) assert.NoError(t, err) db.Open() defer db.Close() @@ -652,7 +728,7 @@ func TestHandleChaincodeDeployErroneousIndexFile(t *testing.T) { if !ok { t.Fatalf("Couchdb state impl is expected to implement interface `statedb.IndexCapable`") } - indexCapable.ProcessIndexesForChaincodeDeploy("ns1", indexData) + assert.Error(t, indexCapable.ProcessIndexesForChaincodeDeploy("ns1", indexData)) queryString := `{"selector":{"owner":"fred"}, "sort": [{"size": "desc"}]}` queryUsingIndex := func() bool { @@ -674,7 +750,6 @@ func TestIsBulkOptimizable(t *testing.T) { } func printCompositeKeys(keys []*statedb.CompositeKey) string { - compositeKeyString := []string{} for _, key := range keys { compositeKeyString = append(compositeKeyString, "["+key.Namespace+","+key.Key+"]") @@ -684,11 +759,10 @@ func printCompositeKeys(keys []*statedb.CompositeKey) string { // TestPaginatedQuery tests queries with pagination func TestPaginatedQuery(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() - db, err := env.DBProvider.GetDBHandle("testpaginatedquery") + db, err := vdbEnv.DBProvider.GetDBHandle("testpaginatedquery") assert.NoError(t, err) db.Open() defer db.Close() @@ -857,7 +931,6 @@ func TestPaginatedQuery(t *testing.T) { } func executeQuery(t *testing.T, db statedb.VersionedDB, namespace, query, bookmark string, limit int32, returnKeys []string) (string, error) { - var itr statedb.ResultsIterator var err error @@ -894,7 +967,6 @@ func executeQuery(t *testing.T, db statedb.VersionedDB, namespace, query, bookma // TestPaginatedQueryValidation tests queries with pagination func TestPaginatedQueryValidation(t *testing.T) { - queryOptions := make(map[string]interface{}) queryOptions["bookmark"] = "Test1" queryOptions["limit"] = int32(10) @@ -938,23 +1010,21 @@ func TestPaginatedQueryValidation(t *testing.T) { } func TestApplyUpdatesWithNilHeight(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() - commontests.TestApplyUpdatesWithNilHeight(t, env.DBProvider) + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() + commontests.TestApplyUpdatesWithNilHeight(t, vdbEnv.DBProvider) } func TestRangeScanWithCouchInternalDocsPresent(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() - db, err := env.DBProvider.GetDBHandle("testrangescanfiltercouchinternaldocs") + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() + db, err := vdbEnv.DBProvider.GetDBHandle("testrangescanfiltercouchinternaldocs") assert.NoError(t, err) couchDatabse, err := db.(*VersionedDB).getNamespaceDBHandle("ns") assert.NoError(t, err) db.Open() defer db.Close() - _, err = couchDatabse.CreateIndex(`{ + _, err = couchDatabse.createIndex(`{ "index" : {"fields" : ["asset_name"]}, "ddoc" : "indexAssetName", "name" : "indexAssetName", @@ -962,7 +1032,7 @@ func TestRangeScanWithCouchInternalDocsPresent(t *testing.T) { }`) assert.NoError(t, err) - _, err = couchDatabse.CreateIndex(`{ + _, err = couchDatabse.createIndex(`{ "index" : {"fields" : ["assetValue"]}, "ddoc" : "indexAssetValue", "name" : "indexAssetValue", @@ -1008,10 +1078,10 @@ func TestRangeScanWithCouchInternalDocsPresent(t *testing.T) { assert.Equal(t, "key-3", s.queryDefinition.startKey) } -func assertQueryResults(t *testing.T, results []*couchdb.QueryResult, expectedIds []string) { +func assertQueryResults(t *testing.T, results []*queryResult, expectedIds []string) { var actualIds []string for _, res := range results { - actualIds = append(actualIds, res.ID) + actualIds = append(actualIds, res.id) } assert.Equal(t, expectedIds, actualIds) } @@ -1067,22 +1137,22 @@ func TestFormatCheck(t *testing.T) { }, } - testEnv.init(t, nil) + vdbEnv.init(t, nil) for i, testCase := range testCases { t.Run( fmt.Sprintf("testCase %d", i), func(t *testing.T) { - testFormatCheck(t, testCase.dataFormat, testCase.dataExists, testCase.expectedErr, testCase.expectedFormat, testEnv.couchAddress) + testFormatCheck(t, testCase.dataFormat, testCase.dataExists, testCase.expectedErr, testCase.expectedFormat, vdbEnv) }) } } -func testFormatCheck(t *testing.T, dataFormat string, dataExists bool, expectedErr *dataformat.ErrVersionMismatch, expectedFormat, couchAddress string) { +func testFormatCheck(t *testing.T, dataFormat string, dataExists bool, expectedErr *dataformat.ErrVersionMismatch, expectedFormat string, vdbEnv *testVDBEnv) { redoPath, err := ioutil.TempDir("", "redoPath") require.NoError(t, err) defer os.RemoveAll(redoPath) - config := &couchdb.Config{ - Address: couchAddress, + config := &ledger.CouchDBConfig{ + Address: vdbEnv.couchDBEnv.couchAddress, MaxRetries: 3, MaxRetriesOnStartup: 20, RequestTimeout: 35 * time.Second, @@ -1100,12 +1170,12 @@ func testFormatCheck(t *testing.T, dataFormat string, dataExists bool, expectedE require.NoError(t, db.ApplyUpdates(batch, version.NewHeight(1, 1))) } if dataFormat == "" { - testutilDropDB(t, dbProvider.couchInstance, fabricInternalDBName) + dropDB(t, dbProvider.couchInstance, fabricInternalDBName) } else { require.NoError(t, writeDataFormatVersion(dbProvider.couchInstance, dataFormat)) } dbProvider.Close() - defer cleanupDB(t, dbProvider.couchInstance) + defer DeleteApplicationDBs(t, vdbEnv.config) // close and reopen with preconditions set and check the expected behavior dbProvider, err = NewVersionedDBProvider(config, &disabled.Provider{}, nil) @@ -1130,7 +1200,7 @@ func testDoesNotExistInCache(t *testing.T, cache *cache, chainID, ns, key string require.Nil(t, cacheValue) } -func testExistInCache(t *testing.T, db *couchdb.CouchDatabase, cache *cache, chainID, ns, key string, expectedVV *statedb.VersionedValue) { +func testExistInCache(t *testing.T, db *couchDatabase, cache *cache, chainID, ns, key string, expectedVV *statedb.VersionedValue) { cacheValue, err := cache.getState(chainID, ns, key) require.NoError(t, err) vv, err := constructVersionedValue(cacheValue) @@ -1142,12 +1212,11 @@ func testExistInCache(t *testing.T, db *couchdb.CouchDatabase, cache *cache, cha } func TestLoadCommittedVersion(t *testing.T) { - env := testEnv - env.init(t, []string{"lscc", "_lifecycle"}) - defer env.cleanup() + vdbEnv.init(t, []string{"lscc", "_lifecycle"}) + defer vdbEnv.cleanup() chainID := "testloadcommittedversion" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // scenario: state cache has (ns1, key1), (ns1, key2), @@ -1164,7 +1233,7 @@ func TestLoadCommittedVersion(t *testing.T) { Version: version.NewHeight(1, 1).ToBytes(), AdditionalInfo: []byte("rev1"), } - require.NoError(t, env.cache.putState(chainID, "ns1", "key1", cacheValue)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns1", "key1", cacheValue)) cacheValue = &CacheValue{ Value: []byte("value2"), @@ -1172,7 +1241,7 @@ func TestLoadCommittedVersion(t *testing.T) { Version: version.NewHeight(1, 2).ToBytes(), AdditionalInfo: []byte("rev2"), } - require.NoError(t, env.cache.putState(chainID, "ns1", "key2", cacheValue)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns1", "key2", cacheValue)) cacheValue = &CacheValue{ Value: []byte("value3"), @@ -1180,7 +1249,7 @@ func TestLoadCommittedVersion(t *testing.T) { Version: version.NewHeight(1, 3).ToBytes(), AdditionalInfo: []byte("rev3"), } - require.NoError(t, env.cache.putState(chainID, "ns2", "key1", cacheValue)) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns2", "key1", cacheValue)) // store (ns2, key2) in the db batch := statedb.NewUpdateBatch() @@ -1239,11 +1308,10 @@ func TestLoadCommittedVersion(t *testing.T) { } func TestMissingRevisionRetrievalFromDB(t *testing.T) { - env := testEnv - env.init(t, nil) - defer env.cleanup() + vdbEnv.init(t, nil) + defer vdbEnv.cleanup() chainID := "testmissingrevisionfromdb" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // store key1, key2, key3 to the DB @@ -1281,12 +1349,11 @@ func TestMissingRevisionRetrievalFromDB(t *testing.T) { } func TestMissingRevisionRetrievalFromCache(t *testing.T) { - env := testEnv - env.init(t, []string{"lscc", "_lifecycle"}) - defer env.cleanup() + vdbEnv.init(t, []string{"lscc", "_lifecycle"}) + defer vdbEnv.cleanup() chainID := "testmissingrevisionfromcache" - db, err := env.DBProvider.GetDBHandle(chainID) + db, err := vdbEnv.DBProvider.GetDBHandle(chainID) require.NoError(t, err) // scenario 1: missing from cache. @@ -1297,7 +1364,7 @@ func TestMissingRevisionRetrievalFromCache(t *testing.T) { require.Empty(t, revisions) // scenario 2: key1 is available in the cache - require.NoError(t, env.cache.putState(chainID, "ns1", "key1", &CacheValue{AdditionalInfo: []byte("rev1")})) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns1", "key1", &CacheValue{AdditionalInfo: []byte("rev1")})) revisions = make(map[string]string) stillMissingKeys, err = db.(*VersionedDB).addMissingRevisionsFromCache("ns1", []string{"key1", "key2"}, revisions) require.NoError(t, err) @@ -1305,7 +1372,7 @@ func TestMissingRevisionRetrievalFromCache(t *testing.T) { require.Equal(t, "rev1", revisions["key1"]) // scenario 3: both key1 and key2 are available in the cache - require.NoError(t, env.cache.putState(chainID, "ns1", "key2", &CacheValue{AdditionalInfo: []byte("rev2")})) + require.NoError(t, vdbEnv.cache.putState(chainID, "ns1", "key2", &CacheValue{AdditionalInfo: []byte("rev2")})) revisions = make(map[string]string) stillMissingKeys, err = db.(*VersionedDB).addMissingRevisionsFromCache("ns1", []string{"key1", "key2"}, revisions) require.NoError(t, err) diff --git a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test_export.go b/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test_export.go deleted file mode 100644 index ad2eb5c487a..00000000000 --- a/core/ledger/kvledger/txmgmt/statedb/statecouchdb/statecouchdb_test_export.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package statecouchdb - -import ( - "io/ioutil" - "os" - "testing" - "time" - - "github.com/hyperledger/fabric/common/metrics/disabled" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" - "github.com/hyperledger/fabric/core/ledger/util/couchdbtest" - "github.com/stretchr/testify/require" -) - -// testVDBEnv provides a couch db backed versioned db for testing -type testVDBEnv struct { - t testing.TB - couchAddress string - DBProvider statedb.VersionedDBProvider - config *couchdb.Config - cleanupCouchDB func() - cache *cache - sysNamespaces []string -} - -func (env *testVDBEnv) init(t testing.TB, sysNamespaces []string) { - t.Logf("Initializing TestVDBEnv") - redoPath, err := ioutil.TempDir("", "cvdbenv") - if err != nil { - t.Fatalf("Failed to create redo log directory: %s", err) - } - - env.startExternalResource() - - config := &couchdb.Config{ - Address: env.couchAddress, - Username: "", - Password: "", - InternalQueryLimit: 1000, - MaxBatchUpdateSize: 1000, - MaxRetries: 3, - MaxRetriesOnStartup: 20, - RequestTimeout: 35 * time.Second, - RedoLogPath: redoPath, - UserCacheSizeMBs: 8, - } - dbProvider, err := NewVersionedDBProvider(config, &disabled.Provider{}, sysNamespaces) - if err != nil { - t.Fatalf("Error creating CouchDB Provider: %s", err) - } - - env.t = t - env.DBProvider = dbProvider - env.config = config - env.cache = dbProvider.cache - env.sysNamespaces = sysNamespaces -} - -// startExternalResource sstarts external couchDB resources for testVDBEnv. -func (env *testVDBEnv) startExternalResource() { - if env.couchAddress == "" { - env.couchAddress, env.cleanupCouchDB = couchdbtest.CouchDBSetup(nil) - } -} - -// stopExternalResource stops external couchDB resources. -func (env *testVDBEnv) stopExternalResource() { - if env.couchAddress != "" { - env.cleanupCouchDB() - } -} - -func (env *testVDBEnv) closeAndReopen() { - env.DBProvider.Close() - dbProvider, _ := NewVersionedDBProvider(env.config, &disabled.Provider{}, env.sysNamespaces) - env.DBProvider = dbProvider - env.cache = dbProvider.cache -} - -// Cleanup drops the test couch databases and closes the db provider -func (env *testVDBEnv) cleanup() { - env.t.Logf("Cleaningup TestVDBEnv") - cleanupDB(env.t, env.DBProvider.(*VersionedDBProvider).couchInstance) - env.DBProvider.Close() - os.RemoveAll(env.config.RedoLogPath) -} - -// CleanupDB deletes all the databases other than fabric internal database -func CleanupDB(t testing.TB, dbProvider statedb.VersionedDBProvider) { - cleanupDB(t, dbProvider.(*VersionedDBProvider).couchInstance) -} - -func cleanupDB(t testing.TB, couchInstance *couchdb.CouchInstance) { - dbNames, err := couchInstance.RetrieveApplicationDBNames() - require.NoError(t, err) - for _, dbName := range dbNames { - if dbName != fabricInternalDBName { - testutilDropDB(t, couchInstance, dbName) - } - } -} - -func testutilDropDB(t testing.TB, couchInstance *couchdb.CouchInstance, dbName string) { - db := &couchdb.CouchDatabase{ - CouchInstance: couchInstance, - DBName: dbName, - } - response, err := db.DropDatabase() - require.NoError(t, err) - require.True(t, response.Ok) -} diff --git a/core/ledger/ledger_interface.go b/core/ledger/ledger_interface.go index 972a37e2fd2..5fea8914350 100644 --- a/core/ledger/ledger_interface.go +++ b/core/ledger/ledger_interface.go @@ -8,6 +8,7 @@ package ledger import ( "fmt" + "time" "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-lib-go/healthz" @@ -18,7 +19,6 @@ import ( "github.com/hyperledger/fabric/bccsp" commonledger "github.com/hyperledger/fabric/common/ledger" "github.com/hyperledger/fabric/common/metrics" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" ) // Initializer encapsulates dependencies for PeerLedgerProvider @@ -53,7 +53,45 @@ type StateDBConfig struct { StateDatabase string // CouchDB is the configuration for CouchDB. It is used when StateDatabase // is set to "CouchDB". - CouchDB *couchdb.Config + CouchDB *CouchDBConfig +} + +// CouchDBConfig is a structure used to configure a CouchInstance. +type CouchDBConfig struct { + // Address is the hostname:port of the CouchDB database instance. + Address string + // Username is the username used to authenticate with CouchDB. This username + // must have read and write access permissions. + Username string + // Password is the password for Username. + Password string + // MaxRetries is the maximum number of times to retry CouchDB operations on + // failure. + MaxRetries int + // MaxRetriesOnStartup is the maximum number of times to retry CouchDB operations on + // failure when initializing the ledger. + MaxRetriesOnStartup int + // RequestTimeout is the timeout used for CouchDB operations. + RequestTimeout time.Duration + // InternalQueryLimit is the maximum number of records to return internally + // when querying CouchDB. + InternalQueryLimit int + // MaxBatchUpdateSize is the maximum number of records to included in CouchDB + // bulk update operations. + MaxBatchUpdateSize int + // WarmIndexesAfterNBlocks is the number of blocks after which to warm any + // CouchDB indexes. + WarmIndexesAfterNBlocks int + // CreateGlobalChangesDB determines whether or not to create the "_global_changes" + // system database. + CreateGlobalChangesDB bool + // RedoLogPath is the directory where the CouchDB redo log files are stored. + RedoLogPath string + // UserCacheSizeMBs denotes the user specified maximum mega bytes (MB) to be allocated + // for the user state cache (i.e., all chaincodes deployed by the user). Note that + // UserCacheSizeMBs needs to be a multiple of 32 MB. If it is not a multiple of 32 MB, + // the peer would round the size to the next multiple of 32 MB. + UserCacheSizeMBs int } // PrivateDataConfig is a structure used to configure a private data storage provider. diff --git a/core/ledger/util/couchdbtest/couchdb.go b/core/ledger/util/couchdbtest/couchdb.go deleted file mode 100644 index 364b041e39a..00000000000 --- a/core/ledger/util/couchdbtest/couchdb.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package couchdbtest - -import ( - "fmt" - "os" - - "github.com/hyperledger/fabric/integration/runner" -) - -// CouchDBSetup setup external couchDB resource. -func CouchDBSetup(binds []string) (addr string, cleanup func()) { - // check if couchDB is being started externally. - externalCouch, set := os.LookupEnv("COUCHDB_ADDR") - if set { - return externalCouch, func() {} - } - - couchDB := &runner.CouchDB{} - couchDB.Binds = binds - - err := couchDB.Start() - if err != nil { - err = fmt.Errorf("failed to start couchDB : %s", err) - panic(err) - } - - return couchDB.Address(), func() { couchDB.Stop() } -} diff --git a/docs/source/metrics_reference.rst b/docs/source/metrics_reference.rst index 1900cb92c94..117e5e7f7bf 100644 --- a/docs/source/metrics_reference.rst +++ b/docs/source/metrics_reference.rst @@ -11,194 +11,188 @@ The following orderer metrics are exported for consumption by Prometheus. +----------------------------------------------+-----------+------------------------------------------------------------+--------------------------------------------------------------------------------+ | Name | Type | Description | Labels | -+==============================================+===========+============================================================+===============+================================================================+ -| blockcutter_block_fill_duration | histogram | The time from first transaction enqueing to the block | channel | | -| | | being cut in seconds. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| broadcast_enqueue_duration | histogram | The time to enqueue a transaction in seconds. | channel | | -| | | +---------------+----------------------------------------------------------------+ -| | | | type | | -| | | +---------------+----------------------------------------------------------------+ -| | | | status | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| broadcast_processed_count | counter | The number of transactions processed. | channel | | -| | | +---------------+----------------------------------------------------------------+ -| | | | type | | -| | | +---------------+----------------------------------------------------------------+ -| | | | status | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| broadcast_validate_duration | histogram | The time to validate a transaction in seconds. | channel | | -| | | +---------------+----------------------------------------------------------------+ -| | | | type | | -| | | +---------------+----------------------------------------------------------------+ -| | | | status | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_egress_queue_capacity | gauge | Capacity of the egress queue. | host | | -| | | +---------------+----------------------------------------------------------------+ -| | | | msg_type | | -| | | +---------------+----------------------------------------------------------------+ -| | | | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_egress_queue_length | gauge | Length of the egress queue. | host | | -| | | +---------------+----------------------------------------------------------------+ -| | | | msg_type | | -| | | +---------------+----------------------------------------------------------------+ -| | | | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_egress_queue_workers | gauge | Count of egress queue workers. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_egress_stream_count | gauge | Count of streams to other nodes. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_egress_tls_connection_count | gauge | Count of TLS connections to other nodes. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_ingress_stream_count | gauge | Count of streams from other nodes. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_msg_dropped_count | counter | Count of messages dropped. | host | | -| | | +---------------+----------------------------------------------------------------+ -| | | | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| cluster_comm_msg_send_time | histogram | The time it takes to send a message in seconds. | host | | -| | | +---------------+----------------------------------------------------------------+ -| | | | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_active_nodes | gauge | Number of active nodes in this channel. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_cluster_size | gauge | Number of nodes in this channel. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_committed_block_number | gauge | The block number of the latest block committed. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_config_proposals_received | counter | The total number of proposals received for config type | channel | | -| | | transactions. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_data_persist_duration | histogram | The time taken for etcd/raft data to be persisted in | channel | | -| | | storage (in seconds). | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_is_leader | gauge | The leadership status of the current node: 1 if it is the | channel | | -| | | leader else 0. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_leader_changes | counter | The number of leader changes since process start. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_normal_proposals_received | counter | The total number of proposals received for normal type | channel | | -| | | transactions. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_proposal_failures | counter | The number of proposal failures. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_etcdraft_snapshot_block_number | gauge | The block number of the latest snapshot. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_batch_size | gauge | The mean batch size in bytes sent to topics. | topic | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_compression_ratio | gauge | The mean compression ratio (as percentage) for topics. | topic | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_incoming_byte_rate | gauge | Bytes/second read off brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_last_offset_persisted | gauge | The offset specified in the block metadata of the most | channel | | -| | | recently committed block. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_outgoing_byte_rate | gauge | Bytes/second written to brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_record_send_rate | gauge | The number of records per second sent to topics. | topic | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_records_per_request | gauge | The mean number of records sent per request to topics. | topic | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_request_latency | gauge | The mean request latency in ms to brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_request_rate | gauge | Requests/second sent to brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_request_size | gauge | The mean request size in bytes to brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_response_rate | gauge | Requests/second sent to brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| consensus_kafka_response_size | gauge | The mean response size in bytes from brokers. | broker_id | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| couchdb_processing_time | histogram | Time taken in seconds for the function to complete request | database | | -| | | to CouchDB +---------------+----------------------------------------------------------------+ -| | | | function_name | | -| | | +---------------+----------------------------------------------------------------+ -| | | | result | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| deliver_blocks_sent | counter | The number of blocks sent by the deliver service. | channel | | -| | | +---------------+----------------------------------------------------------------+ -| | | | filtered | | -| | | +---------------+----------------------------------------------------------------+ -| | | | data_type | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| deliver_requests_completed | counter | The number of deliver requests that have been completed. | channel | | -| | | +---------------+----------------------------------------------------------------+ -| | | | filtered | | -| | | +---------------+----------------------------------------------------------------+ -| | | | data_type | | -| | | +---------------+----------------------------------------------------------------+ -| | | | success | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| deliver_requests_received | counter | The number of deliver requests that have been received. | channel | | -| | | +---------------+----------------------------------------------------------------+ -| | | | filtered | | -| | | +---------------+----------------------------------------------------------------+ -| | | | data_type | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| deliver_streams_closed | counter | The number of GRPC streams that have been closed for the | | | -| | | deliver service. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| deliver_streams_opened | counter | The number of GRPC streams that have been opened for the | | | -| | | deliver service. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| fabric_version | gauge | The active version of Fabric. | version | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_comm_conn_closed | counter | gRPC connections closed. Open minus closed is the active | | | -| | | number of connections. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_comm_conn_opened | counter | gRPC connections opened. Open minus closed is the active | | | -| | | number of connections. | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_stream_messages_received | counter | The number of stream messages received. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_stream_messages_sent | counter | The number of stream messages sent. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_stream_request_duration | histogram | The time to complete a stream request. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -| | | +---------------+----------------------------------------------------------------+ -| | | | code | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_stream_requests_completed | counter | The number of stream requests completed. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -| | | +---------------+----------------------------------------------------------------+ -| | | | code | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_stream_requests_received | counter | The number of stream requests received. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_unary_request_duration | histogram | The time to complete a unary request. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -| | | +---------------+----------------------------------------------------------------+ -| | | | code | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_unary_requests_completed | counter | The number of unary requests completed. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -| | | +---------------+----------------------------------------------------------------+ -| | | | code | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| grpc_server_unary_requests_received | counter | The number of unary requests received. | service | | -| | | +---------------+----------------------------------------------------------------+ -| | | | method | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| ledger_blockchain_height | gauge | Height of the chain in blocks. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| ledger_blockstorage_commit_time | histogram | Time taken in seconds for committing the block to storage. | channel | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| logging_entries_checked | counter | Number of log entries checked against the active logging | level | | -| | | level | | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ -| logging_entries_written | counter | Number of log entries that are written | level | | -+----------------------------------------------+-----------+------------------------------------------------------------+---------------+----------------------------------------------------------------+ ++==============================================+===========+============================================================+===========+====================================================================+ +| blockcutter_block_fill_duration | histogram | The time from first transaction enqueing to the block | channel | | +| | | being cut in seconds. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| broadcast_enqueue_duration | histogram | The time to enqueue a transaction in seconds. | channel | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | type | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | status | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| broadcast_processed_count | counter | The number of transactions processed. | channel | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | type | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | status | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| broadcast_validate_duration | histogram | The time to validate a transaction in seconds. | channel | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | type | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | status | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_egress_queue_capacity | gauge | Capacity of the egress queue. | host | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | msg_type | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_egress_queue_length | gauge | Length of the egress queue. | host | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | msg_type | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_egress_queue_workers | gauge | Count of egress queue workers. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_egress_stream_count | gauge | Count of streams to other nodes. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_egress_tls_connection_count | gauge | Count of TLS connections to other nodes. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_ingress_stream_count | gauge | Count of streams from other nodes. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_msg_dropped_count | counter | Count of messages dropped. | host | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| cluster_comm_msg_send_time | histogram | The time it takes to send a message in seconds. | host | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_active_nodes | gauge | Number of active nodes in this channel. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_cluster_size | gauge | Number of nodes in this channel. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_committed_block_number | gauge | The block number of the latest block committed. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_config_proposals_received | counter | The total number of proposals received for config type | channel | | +| | | transactions. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_data_persist_duration | histogram | The time taken for etcd/raft data to be persisted in | channel | | +| | | storage (in seconds). | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_is_leader | gauge | The leadership status of the current node: 1 if it is the | channel | | +| | | leader else 0. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_leader_changes | counter | The number of leader changes since process start. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_normal_proposals_received | counter | The total number of proposals received for normal type | channel | | +| | | transactions. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_proposal_failures | counter | The number of proposal failures. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_etcdraft_snapshot_block_number | gauge | The block number of the latest snapshot. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_batch_size | gauge | The mean batch size in bytes sent to topics. | topic | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_compression_ratio | gauge | The mean compression ratio (as percentage) for topics. | topic | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_incoming_byte_rate | gauge | Bytes/second read off brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_last_offset_persisted | gauge | The offset specified in the block metadata of the most | channel | | +| | | recently committed block. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_outgoing_byte_rate | gauge | Bytes/second written to brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_record_send_rate | gauge | The number of records per second sent to topics. | topic | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_records_per_request | gauge | The mean number of records sent per request to topics. | topic | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_request_latency | gauge | The mean request latency in ms to brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_request_rate | gauge | Requests/second sent to brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_request_size | gauge | The mean request size in bytes to brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_response_rate | gauge | Requests/second sent to brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_kafka_response_size | gauge | The mean response size in bytes from brokers. | broker_id | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| deliver_blocks_sent | counter | The number of blocks sent by the deliver service. | channel | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | filtered | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | data_type | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| deliver_requests_completed | counter | The number of deliver requests that have been completed. | channel | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | filtered | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | data_type | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | success | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| deliver_requests_received | counter | The number of deliver requests that have been received. | channel | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | filtered | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | data_type | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| deliver_streams_closed | counter | The number of GRPC streams that have been closed for the | | | +| | | deliver service. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| deliver_streams_opened | counter | The number of GRPC streams that have been opened for the | | | +| | | deliver service. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| fabric_version | gauge | The active version of Fabric. | version | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_comm_conn_closed | counter | gRPC connections closed. Open minus closed is the active | | | +| | | number of connections. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_comm_conn_opened | counter | gRPC connections opened. Open minus closed is the active | | | +| | | number of connections. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_stream_messages_received | counter | The number of stream messages received. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_stream_messages_sent | counter | The number of stream messages sent. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_stream_request_duration | histogram | The time to complete a stream request. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | code | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_stream_requests_completed | counter | The number of stream requests completed. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | code | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_stream_requests_received | counter | The number of stream requests received. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_unary_request_duration | histogram | The time to complete a unary request. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | code | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_unary_requests_completed | counter | The number of unary requests completed. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | code | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| grpc_server_unary_requests_received | counter | The number of unary requests received. | service | | +| | | +-----------+--------------------------------------------------------------------+ +| | | | method | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| ledger_blockchain_height | gauge | Height of the chain in blocks. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| ledger_blockstorage_commit_time | histogram | Time taken in seconds for committing the block to storage. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| logging_entries_checked | counter | Number of log entries checked against the active logging | level | | +| | | level | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| logging_entries_written | counter | Number of log entries that are written | level | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ StatsD ~~~~~~ @@ -287,9 +281,6 @@ associated with the metric. +---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ | consensus.kafka.response_size.%{broker_id} | gauge | The mean response size in bytes from brokers. | +---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ -| couchdb.processing_time.%{database}.%{function_name}.%{result} | histogram | Time taken in seconds for the function to complete request | -| | | to CouchDB | -+---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ | deliver.blocks_sent.%{channel}.%{filtered}.%{data_type} | counter | The number of blocks sent by the deliver service. | +---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ | deliver.requests_completed.%{channel}.%{filtered}.%{data_type}.%{success} | counter | The number of deliver requests that have been completed. | diff --git a/internal/peer/node/config.go b/internal/peer/node/config.go index 753dc91e53e..e82f890ec46 100644 --- a/internal/peer/node/config.go +++ b/internal/peer/node/config.go @@ -11,7 +11,6 @@ import ( coreconfig "github.com/hyperledger/fabric/core/config" "github.com/hyperledger/fabric/core/ledger" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" "github.com/spf13/viper" ) @@ -47,7 +46,7 @@ func ledgerConfig() *ledger.Config { RootFSPath: rootFSPath, StateDBConfig: &ledger.StateDBConfig{ StateDatabase: viper.GetString("ledger.state.stateDatabase"), - CouchDB: &couchdb.Config{}, + CouchDB: &ledger.CouchDBConfig{}, }, PrivateDataConfig: &ledger.PrivateDataConfig{ MaxBatchSize: collElgProcMaxDbBatchSize, @@ -60,7 +59,7 @@ func ledgerConfig() *ledger.Config { } if conf.StateDBConfig.StateDatabase == "CouchDB" { - conf.StateDBConfig.CouchDB = &couchdb.Config{ + conf.StateDBConfig.CouchDB = &ledger.CouchDBConfig{ Address: viper.GetString("ledger.state.couchDBConfig.couchDBAddress"), Username: viper.GetString("ledger.state.couchDBConfig.username"), Password: viper.GetString("ledger.state.couchDBConfig.password"), diff --git a/internal/peer/node/config_test.go b/internal/peer/node/config_test.go index ecd54c7ea1f..c2833eb2224 100644 --- a/internal/peer/node/config_test.go +++ b/internal/peer/node/config_test.go @@ -11,7 +11,6 @@ import ( "time" "github.com/hyperledger/fabric/core/ledger" - "github.com/hyperledger/fabric/core/ledger/util/couchdb" "github.com/spf13/viper" "github.com/stretchr/testify/assert" ) @@ -33,7 +32,7 @@ func TestLedgerConfig(t *testing.T) { RootFSPath: "/peerfs/ledgersData", StateDBConfig: &ledger.StateDBConfig{ StateDatabase: "goleveldb", - CouchDB: &couchdb.Config{}, + CouchDB: &ledger.CouchDBConfig{}, }, PrivateDataConfig: &ledger.PrivateDataConfig{ MaxBatchSize: 5000, @@ -63,7 +62,7 @@ func TestLedgerConfig(t *testing.T) { RootFSPath: "/peerfs/ledgersData", StateDBConfig: &ledger.StateDBConfig{ StateDatabase: "CouchDB", - CouchDB: &couchdb.Config{ + CouchDB: &ledger.CouchDBConfig{ Address: "localhost:5984", Username: "username", Password: "password", @@ -113,7 +112,7 @@ func TestLedgerConfig(t *testing.T) { RootFSPath: "/peerfs/ledgersData", StateDBConfig: &ledger.StateDBConfig{ StateDatabase: "CouchDB", - CouchDB: &couchdb.Config{ + CouchDB: &ledger.CouchDBConfig{ Address: "localhost:5984", Username: "username", Password: "password",