Skip to content
This repository has been archived by the owner on Jun 27, 2023. It is now read-only.

Commit

Permalink
minor docs
Browse files Browse the repository at this point in the history
  • Loading branch information
schomatis committed Aug 16, 2021
1 parent 23e23fa commit 599c09e
Showing 1 changed file with 13 additions and 11 deletions.
24 changes: 13 additions & 11 deletions io/directory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func TestUpgradeableDirectory(t *testing.T) {
compareDirectoryEntries(t, upgradedDir, dir, []*ipld.Link{missingLink})
}

// Test that we fetch as little nodes needed to reach the HAMTShardingSize
// Test that we fetch as little nodes as needed to reach the HAMTShardingSize
// during the sizeBelowThreshold computation.
// FIXME: This only works for a sequential DAG walk.
// FIXME: Failing in the CI for Ubuntu. This may likely be an indication of race
Expand All @@ -269,7 +269,7 @@ func TestHAMTEnumerationWhenComputingSize(t *testing.T) {
// are the "value" links pointing to antyhing that is *not* another Shard).
estimatedLinkSize = mockLinkSizeFunc(1)
defer func() { estimatedLinkSize = productionLinkSize }()
// Use an identity hash function to ease the construction of "full" HAMTs
// Use an identity hash function to ease the construction of "complete" HAMTs
// (see CreateCompleteHAMT below for more details). (Ideally this should be
// a parameter we pass and not a global option we modify in the caller.)
oldHashFunc := hamt.HAMTHashFunction
Expand All @@ -280,38 +280,40 @@ func TestHAMTEnumerationWhenComputingSize(t *testing.T) {
//DefaultShardWidth = 8
// FIXME: We should be able to use a smaller DefaultShardWidth to have
// a deeper tree and cheaper tests once the import cycle is resolved
// in hamt.CreateCompleteHAMT.
// in hamt.CreateCompleteHAMT and the DefaultShardWidth value is not
// hardcoded there.

// We create a "complete" HAMT (see CreateCompleteHAMT for more details)
// with a regular structure to be able to predict how many Shard nodes we
// will need to fetch in order to reach the HAMTShardingSize threshold in
// sizeBelowThreshold (assuming a sequential DAG walk function).
oldHamtOption := HAMTShardingSize
defer func() { HAMTShardingSize = oldHamtOption }()
// (Some arbitrary values below that make this test not that expensive.)
treeHeight := 2
thresholdToWidthRatio := 4 // How many leaf shards nodes (with value links,
// directory entries) do we need to reach the threshold.
// i.e., directory entries) do we need to reach the threshold.
HAMTShardingSize = DefaultShardWidth * thresholdToWidthRatio
// With this structure we will then need to fetch the following nodes:
// * `thresholdToWidthRatio` leaf Shards with enough value link to reach
// * `thresholdToWidthRatio` leaf Shards with enough value links to reach
// the HAMTShardingSize threshold.
// * `(treeHeight - 1)` internal nodes to reach those leaf Shard nodes
// (assuming we have thresholdToWidthRatio below the DefaultShardWidth,
// i.e., all leaf nodes come from the same parent).
nodesToFetch := thresholdToWidthRatio + treeHeight - 1
ds := mdtest.Mock()
node, err := hamt.CreateCompleteHAMT(ds, treeHeight)
completeHAMTRoot, err := hamt.CreateCompleteHAMT(ds, treeHeight)
assert.NoError(t, err)

countGetsDS := newCountGetsDS(ds)
hamtDir, err := newHAMTDirectoryFromNode(countGetsDS, node)
hamtDir, err := newHAMTDirectoryFromNode(countGetsDS, completeHAMTRoot)
assert.NoError(t, err)

countGetsDS.resetCounter()
// FIXME: Only works with sequential DAG walk (now hardcoded, needs to be
// added to the internal API) where we can predict the Get requests and
// tree traversal. It would be desirable to have some test for the concurrent
// walk (actually used in production).
// walk (which is the one used in production).
below, err := hamtDir.sizeBelowThreshold(context.TODO(), 0)
assert.NoError(t, err)
assert.False(t, below)
Expand Down Expand Up @@ -491,17 +493,17 @@ func (d *countGetsDS) uniqueCidsFetched() int {
func (d *countGetsDS) Get(ctx context.Context, c cid.Cid) (ipld.Node, error) {
node, err := d.DAGService.Get(ctx, c)
if err != nil {
return node, err
return nil, err
}

d.mapLock.Lock()
d.cidsFetched[c] = struct{}{}
d.mapLock.Unlock()

return node, err
return node, nil
}

// Process sequentially. We don't care about performance here.
// Process sequentially (blocking) calling Get which tracks requests.
func (d *countGetsDS) GetMany(ctx context.Context, cids []cid.Cid) <-chan *ipld.NodeOption {
out := make(chan *ipld.NodeOption, len(cids))
defer close(out)
Expand Down

0 comments on commit 599c09e

Please sign in to comment.