From 85013acf9bfbdea722db6e8f583ab59924dd0577 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Wed, 31 Jul 2019 16:48:17 -0400 Subject: [PATCH 01/22] Added the ability to use an existing LookupMetadata instance to initialize a new (or *many* new) LookupMetadata instances. --- .../com/upserve/uppend/lookup/LookupData.java | 33 ++++++++++++++----- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index ad05b45a..ba076d24 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -65,7 +65,6 @@ public static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, Virtual } private LookupData(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int flushThreshold, int reloadInterval, boolean readOnly) { - this.keyLongBlobs = keyLongBlobs; this.metadataBlobs = metadataBlobs; @@ -367,13 +366,17 @@ private Long findValueFor(LookupKey key) { } LookupMetadata loadMetadata() { + return loadMetadata(null); + } + + LookupMetadata loadMetadata(LookupMetadata prev) { if (readOnly) { try { return LookupMetadata.open( getMetadataBlobs(), getMetaDataGeneration(), - getMetaMissCount(), - getMetaHitCount() + getMetaMissCount(prev == null ? 0 : prev.missCount.longValue()), + getMetaHitCount(prev == null ? 0 : prev.hitCount.longValue()) ); } catch (IllegalStateException e) { // Try again and let the exception bubble if it fails @@ -381,8 +384,8 @@ LookupMetadata loadMetadata() { return LookupMetadata.open( getMetadataBlobs(), getMetaDataGeneration(), - getMetaMissCount(), - getMetaHitCount() + getMetaMissCount(prev == null ? 0 : prev.missCount.longValue()), + getMetaHitCount(prev == null ? 0 : prev.hitCount.longValue()) ); } } else { @@ -419,12 +422,26 @@ private int getMetaDataGeneration() { return metaDataGeneration.get(); } + private static LongAdder longAdderWithInitialValue(long initialValue) { + LongAdder la = new LongAdder(); + la.add(initialValue); + return la; + } + private LongAdder getMetaHitCount() { - return Optional.ofNullable(atomicMetadataRef.get()).map(md -> md.hitCount).orElse(new LongAdder()); + return getMetaHitCount(0l); + } + + private LongAdder getMetaHitCount(long initialValue) { + return Optional.ofNullable(atomicMetadataRef.get()).map(md -> md.hitCount).orElse(longAdderWithInitialValue(initialValue)); } private LongAdder getMetaMissCount() { - return Optional.ofNullable(atomicMetadataRef.get()).map(md -> md.missCount).orElse(new LongAdder()); + return getMetaMissCount(0l); + } + + private LongAdder getMetaMissCount(long initialValue) { + return Optional.ofNullable(atomicMetadataRef.get()).map(md -> md.missCount).orElse(longAdderWithInitialValue(initialValue)); } /** @@ -582,7 +599,7 @@ protected LookupMetadata getMetadata() { boolean luckyMe = reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); if (luckyMe) { - result = loadMetadata(); + result = loadMetadata(result); timeStampedMetadata.set(result, stamp[0] + reloadInterval); } } From bb2451264921f534014826c2653db38465f79c11 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Thu, 1 Aug 2019 13:18:42 -0400 Subject: [PATCH 02/22] Fixed a bunch of stuff. Passing around more "previous" values now. --- .../com/upserve/uppend/lookup/LookupData.java | 84 ++++++------------- 1 file changed, 27 insertions(+), 57 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index ba076d24..fdaa3668 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -25,7 +25,7 @@ public class LookupData implements Flushable, Trimmable { private final int flushThreshold; private final int firstFlushThreshold; - private final int reloadInterval; + private final int reloadInterval; // Reload interval is specified in seconds // The container for stuff we need to write - Only new keys can be in the write cache final ConcurrentHashMap writeCache; @@ -366,44 +366,36 @@ private Long findValueFor(LookupKey key) { } LookupMetadata loadMetadata() { - return loadMetadata(null); + return loadMetadata(); } - LookupMetadata loadMetadata(LookupMetadata prev) { - if (readOnly) { - try { - return LookupMetadata.open( - getMetadataBlobs(), - getMetaDataGeneration(), - getMetaMissCount(prev == null ? 0 : prev.missCount.longValue()), - getMetaHitCount(prev == null ? 0 : prev.hitCount.longValue()) - ); - } catch (IllegalStateException e) { - // Try again and let the exception bubble if it fails + LookupMetadata loadMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { + try { + return LookupMetadata.open( + getMetadataBlobs(), + getMetaDataGeneration(), + prevMissCount, + prevHitCount + ); + } catch (IllegalStateException e) { + if (readOnly) { log.warn("getMetaData failed for read only store - attempting to reload!", e); + // Try again and let the exception bubble if it fails return LookupMetadata.open( getMetadataBlobs(), getMetaDataGeneration(), - getMetaMissCount(prev == null ? 0 : prev.missCount.longValue()), - getMetaHitCount(prev == null ? 0 : prev.hitCount.longValue()) + prevMissCount, + prevHitCount ); } - } else { - try { - return LookupMetadata.open( - getMetadataBlobs(), - getMetaDataGeneration(), - getMetaMissCount(), - getMetaHitCount() - ); - } catch (IllegalStateException e) { + else { log.warn("getMetaData failed for read write store - attempting to repair it!", e); - return repairMetadata(); + return repairMetadata(prevMissCount, prevHitCount); } } } - private synchronized LookupMetadata repairMetadata() { + private synchronized LookupMetadata repairMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { int[] sortedPositions = keyLongBlobs.positionBlobStream() .sorted(Comparator.comparing(entry -> new LookupKey(entry.getValue()))) .mapToInt(entry -> entry.getKey().intValue()) @@ -412,7 +404,7 @@ private synchronized LookupMetadata repairMetadata() { int sortedPositionsSize = sortedPositions.length; LookupKey minKey = sortedPositionsSize > 0 ? readKey((long) sortedPositions[0]) : null; LookupKey maxKey = sortedPositionsSize > 0 ? readKey((long) sortedPositions[sortedPositionsSize - 1]) : null; - return LookupMetadata.generateMetadata(minKey, maxKey, sortedPositions, metadataBlobs, metaDataGeneration.incrementAndGet(), getMetaMissCount(), getMetaHitCount()); + return LookupMetadata.generateMetadata(minKey, maxKey, sortedPositions, metadataBlobs, metaDataGeneration.incrementAndGet(), prevMissCount, prevHitCount); } catch (IOException e) { throw new UncheckedIOException("Unable to write repaired metadata!", e); } @@ -422,28 +414,6 @@ private int getMetaDataGeneration() { return metaDataGeneration.get(); } - private static LongAdder longAdderWithInitialValue(long initialValue) { - LongAdder la = new LongAdder(); - la.add(initialValue); - return la; - } - - private LongAdder getMetaHitCount() { - return getMetaHitCount(0l); - } - - private LongAdder getMetaHitCount(long initialValue) { - return Optional.ofNullable(atomicMetadataRef.get()).map(md -> md.hitCount).orElse(longAdderWithInitialValue(initialValue)); - } - - private LongAdder getMetaMissCount() { - return getMetaMissCount(0l); - } - - private LongAdder getMetaMissCount(long initialValue) { - return Optional.ofNullable(atomicMetadataRef.get()).map(md -> md.missCount).orElse(longAdderWithInitialValue(initialValue)); - } - /** * Create a copy of the keys currently in the write cache * @@ -524,7 +494,7 @@ void flushWriteCache(LookupMetadata currentMetadata) { log.debug("flushed keys"); } - void generateMetaData(LookupMetadata currentMetadata) { + void generateMetaData(LookupMetadata currentMetadata, LongAdder prevMissCount, LongAdder prevHitCount) { int[] currentKeySortOrder = currentMetadata.getKeyStorageOrder(); int flushSize = flushCache.size(); @@ -581,8 +551,8 @@ void generateMetaData(LookupMetadata currentMetadata) { newKeySortOrder, metadataBlobs, metaDataGeneration.incrementAndGet(), - getMetaMissCount(), - getMetaHitCount()) + prevMissCount, + prevHitCount) ); } catch (IOException e) { throw new UncheckedIOException("Failed to write new metadata!", e); @@ -596,10 +566,10 @@ protected LookupMetadata getMetadata() { // Convert millis to seconds if (((System.currentTimeMillis() - startTime) / 1000) > stamp[0]){ - boolean luckyMe = reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); - - if (luckyMe) { - result = loadMetadata(result); + // a reloadInterval of 0 prevents reloading of the metadata + boolean reloadMetadata = !reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); + if (reloadMetadata) { + result = loadMetadata(result.missCount, result.hitCount); timeStampedMetadata.set(result, stamp[0] + reloadInterval); } } @@ -621,7 +591,7 @@ public synchronized void flush() { LookupMetadata md = atomicMetadataRef.get(); flushWriteCache(md); - generateMetaData(md); + generateMetaData(md, new LongAdder(), new LongAdder()); flushCache.clear(); From 569ad506e74e5e4499bc000aa1748f8b12d55684 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Thu, 1 Aug 2019 13:23:47 -0400 Subject: [PATCH 03/22] Removed `else` in unnecessary `else` clause. --- src/main/java/com/upserve/uppend/lookup/LookupData.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index fdaa3668..8701be01 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -388,10 +388,8 @@ LookupMetadata loadMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { prevHitCount ); } - else { - log.warn("getMetaData failed for read write store - attempting to repair it!", e); - return repairMetadata(prevMissCount, prevHitCount); - } + log.warn("getMetaData failed for read write store - attempting to repair it!", e); + return repairMetadata(prevMissCount, prevHitCount); } } From 7f805a347175673e6f06a1aa4bd0f28cf2587252 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Thu, 1 Aug 2019 13:46:22 -0400 Subject: [PATCH 04/22] Added a "default" `generateMetaData` method to keep existsing tests from failing. --- src/main/java/com/upserve/uppend/lookup/LookupData.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 8701be01..134d9868 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -492,6 +492,10 @@ void flushWriteCache(LookupMetadata currentMetadata) { log.debug("flushed keys"); } + void generateMetaData(LookupMetadata currentMetadata) { + generateMetaData(currentMetadata, new LongAdder(), new LongAdder()); + } + void generateMetaData(LookupMetadata currentMetadata, LongAdder prevMissCount, LongAdder prevHitCount) { int[] currentKeySortOrder = currentMetadata.getKeyStorageOrder(); @@ -589,7 +593,7 @@ public synchronized void flush() { LookupMetadata md = atomicMetadataRef.get(); flushWriteCache(md); - generateMetaData(md, new LongAdder(), new LongAdder()); + generateMetaData(md); flushCache.clear(); From 535fd3c4c65fa70b36366478a2f690760d9556be Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Thu, 1 Aug 2019 13:59:11 -0400 Subject: [PATCH 05/22] Fixed accidental unbounded recursion. --- src/main/java/com/upserve/uppend/lookup/LookupData.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 134d9868..c8cb591a 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -366,7 +366,7 @@ private Long findValueFor(LookupKey key) { } LookupMetadata loadMetadata() { - return loadMetadata(); + return loadMetadata(new LongAdder(), new LongAdder()); } LookupMetadata loadMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { From fd15f7645d2994d26d5d1b065615a7ace77e3824 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Fri, 2 Aug 2019 13:11:38 -0400 Subject: [PATCH 06/22] Wrapped calls to findKey with a timeFindKey method to ensure that all calls to findKey are properly accounted. --- .../com/upserve/uppend/lookup/LookupData.java | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index c8cb591a..ae66a3e5 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -345,6 +345,20 @@ public long getFindKeyTimer(){ public int getFlushCount() { return flushCounter; } + // i was going to start counting calls to findKey + // not sure if a LongAdder is needed or if I can use just a long + //public static LongAdder findKeyCalls_la = new LongAdder(); + //public static long findKeyCalls_l = 0l; + + private Long timeFindKey(LookupMetadata md, VirtualLongBlobStore longBlobStore, LookupKey key) { + //findKeyCalls_la.increment(); + //findKeyCalls_l++; // not an atomic operation! + long tic = -System.nanoTime(); + Long val = md.findKey(longBlobStore, key); + findKeyTimer.add(System.nanoTime() + tic); + return val; + } + /** * Load a key from cached pages * @@ -359,12 +373,12 @@ private Long findValueFor(LookupKey key) { } } LookupMetadata md = getMetadata(); - long tic = -System.nanoTime(); - Long val = md.findKey(keyLongBlobs, key); - findKeyTimer.add(System.nanoTime() + tic); - return val; + return timeFindKey(md, keyLongBlobs, key); } + // Allows calling of loadMetaData with default-0 LongAdders. + // This is used in the constructor of this class and also + // in the test class. LookupMetadata loadMetadata() { return loadMetadata(new LongAdder(), new LongAdder()); } @@ -465,7 +479,8 @@ void flushWriteCache(LookupMetadata currentMetadata) { // Check the metadata generation of the LookupKeys if (key.getMetaDataGeneration() != currentMetadataGeneration) { // Update the index of the key for the current metadata generation for so we can insert it correctly - currentMetadata.findKey(keyLongBlobs, key); + //currentMetadata.findKey(keyLongBlobs, key); + timeFindKey(currentMetadata, keyLongBlobs, key); } }) .forEach(key -> { @@ -571,6 +586,7 @@ protected LookupMetadata getMetadata() { // a reloadInterval of 0 prevents reloading of the metadata boolean reloadMetadata = !reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); if (reloadMetadata) { + log.warn("getMetadata calling loadMetadata"); result = loadMetadata(result.missCount, result.hitCount); timeStampedMetadata.set(result, stamp[0] + reloadInterval); } From dc806b018093f7b3cbc19f1b49b41409fd852e52 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Fri, 2 Aug 2019 13:18:40 -0400 Subject: [PATCH 07/22] Using currentMetadata to get missCount and hitCount. --- src/main/java/com/upserve/uppend/lookup/LookupData.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index ae66a3e5..7d399c5a 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -508,7 +508,7 @@ void flushWriteCache(LookupMetadata currentMetadata) { } void generateMetaData(LookupMetadata currentMetadata) { - generateMetaData(currentMetadata, new LongAdder(), new LongAdder()); + generateMetaData(currentMetadata, currentMetadata.missCount, currentMetadata.hitCount); } void generateMetaData(LookupMetadata currentMetadata, LongAdder prevMissCount, LongAdder prevHitCount) { From eb3fbb7b247955461c03e77b66c8e9d8898c90db Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Fri, 2 Aug 2019 17:02:46 -0400 Subject: [PATCH 08/22] Removed some debugging code that ended up not showing anything significant. --- src/main/java/com/upserve/uppend/lookup/LookupData.java | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 7d399c5a..1a5add2b 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -345,14 +345,7 @@ public long getFindKeyTimer(){ public int getFlushCount() { return flushCounter; } - // i was going to start counting calls to findKey - // not sure if a LongAdder is needed or if I can use just a long - //public static LongAdder findKeyCalls_la = new LongAdder(); - //public static long findKeyCalls_l = 0l; - private Long timeFindKey(LookupMetadata md, VirtualLongBlobStore longBlobStore, LookupKey key) { - //findKeyCalls_la.increment(); - //findKeyCalls_l++; // not an atomic operation! long tic = -System.nanoTime(); Long val = md.findKey(longBlobStore, key); findKeyTimer.add(System.nanoTime() + tic); @@ -402,6 +395,7 @@ LookupMetadata loadMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { prevHitCount ); } + // `else` statement not needed because of the return statement above log.warn("getMetaData failed for read write store - attempting to repair it!", e); return repairMetadata(prevMissCount, prevHitCount); } @@ -479,7 +473,6 @@ void flushWriteCache(LookupMetadata currentMetadata) { // Check the metadata generation of the LookupKeys if (key.getMetaDataGeneration() != currentMetadataGeneration) { // Update the index of the key for the current metadata generation for so we can insert it correctly - //currentMetadata.findKey(keyLongBlobs, key); timeFindKey(currentMetadata, keyLongBlobs, key); } }) From 98e09ac02c35e57fa3d2ed0f9707c342be4faaef Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Mon, 5 Aug 2019 20:04:16 -0400 Subject: [PATCH 09/22] Add internal metrics objects and adders --- .../upserve/uppend/AppendStoreMetrics.java | 8 + .../java/com/upserve/uppend/BlockStats.java | 74 ------- .../com/upserve/uppend/KeyStoreMetrics.java | 11 ++ .../com/upserve/uppend/PartitionStats.java | 144 -------------- .../metrics/AppendOnlyStoreWithMetrics.java | 23 ++- .../uppend/metrics/BlobStoreMetrics.java | 123 ++++++++++++ .../uppend/metrics/BlockedLongMetrics.java | 151 ++++++++++++++ .../metrics/CounterStoreWithMetrics.java | 15 ++ .../uppend/metrics/InternalMetrics.java | 65 ++++++ .../uppend/metrics/LongBlobStoreMetrics.java | 186 ++++++++++++++++++ .../uppend/metrics/LookupDataMetrics.java | 143 ++++++++++++++ .../metrics/MutableBlobStoreMetrics.java | 123 ++++++++++++ .../AppendOnlyStoreWithMetricsTest.java | 6 +- 13 files changed, 847 insertions(+), 225 deletions(-) create mode 100644 src/main/java/com/upserve/uppend/AppendStoreMetrics.java delete mode 100644 src/main/java/com/upserve/uppend/BlockStats.java create mode 100644 src/main/java/com/upserve/uppend/KeyStoreMetrics.java delete mode 100644 src/main/java/com/upserve/uppend/PartitionStats.java create mode 100644 src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java create mode 100644 src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java create mode 100644 src/main/java/com/upserve/uppend/metrics/InternalMetrics.java create mode 100644 src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java create mode 100644 src/main/java/com/upserve/uppend/metrics/LookupDataMetrics.java create mode 100644 src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java diff --git a/src/main/java/com/upserve/uppend/AppendStoreMetrics.java b/src/main/java/com/upserve/uppend/AppendStoreMetrics.java new file mode 100644 index 00000000..b5b63029 --- /dev/null +++ b/src/main/java/com/upserve/uppend/AppendStoreMetrics.java @@ -0,0 +1,8 @@ +package com.upserve.uppend; + +import com.upserve.uppend.metrics.*; + +public interface AppendStoreMetrics { + BlockedLongMetrics getBlockedLongMetrics(); + BlobStoreMetrics getBlobStoreMetrics(); +} diff --git a/src/main/java/com/upserve/uppend/BlockStats.java b/src/main/java/com/upserve/uppend/BlockStats.java deleted file mode 100644 index 46433d0c..00000000 --- a/src/main/java/com/upserve/uppend/BlockStats.java +++ /dev/null @@ -1,74 +0,0 @@ -package com.upserve.uppend; - -import java.util.Objects; - -public class BlockStats { - - private final int pagesLoaded; - private final long size; - private final long appendCount; - private final long allocCount; - private final long valuesReadCount; - - public static BlockStats ZERO_STATS = new BlockStats(0,0,0,0,0); - public BlockStats(int pagesLoaded, long size, long appendCount, long allocCount, long valuesReadCount) { - this.pagesLoaded = pagesLoaded; - this.size = size; - this.appendCount = appendCount; - this.allocCount = allocCount; - this.valuesReadCount = valuesReadCount; - } - - public int getPagesLoaded() { - return pagesLoaded; - } - - public long getSize() { - return size; - } - - public long getAppendCount() { - return appendCount; - } - - public long getAllocCount() { - return allocCount; - } - - public long getValuesReadCount() { - return valuesReadCount; - } - - @Override - public String toString() { - return "BlockStats{" + - "pagesLoaded=" + pagesLoaded + - ", size=" + size + - ", appendCount=" + appendCount + - ", allocCount=" + allocCount + - ", valuesReadCount=" + valuesReadCount + - '}'; - } - - public BlockStats minus(BlockStats other) { - if (Objects.isNull(other)) throw new NullPointerException("BlockStats minus method argument is null"); - return new BlockStats( - pagesLoaded - other.pagesLoaded, - size - other.size, - appendCount - other.appendCount, - allocCount - other.allocCount, - valuesReadCount - other.valuesReadCount - ); - } - - public BlockStats add(BlockStats other) { - if (Objects.isNull(other)) throw new NullPointerException("BlockStats add method argument is null"); - return new BlockStats( - pagesLoaded + other.pagesLoaded, - size + other.size, - appendCount + other.appendCount, - allocCount + other.allocCount, - valuesReadCount + other.valuesReadCount - ); - } -} diff --git a/src/main/java/com/upserve/uppend/KeyStoreMetrics.java b/src/main/java/com/upserve/uppend/KeyStoreMetrics.java new file mode 100644 index 00000000..5828672d --- /dev/null +++ b/src/main/java/com/upserve/uppend/KeyStoreMetrics.java @@ -0,0 +1,11 @@ +package com.upserve.uppend; + +import com.upserve.uppend.metrics.*; + +public interface KeyStoreMetrics { + + LookupDataMetrics getLookupDataMetrics(); + LongBlobStoreMetrics getLongBlobStoreMetrics(); + MutableBlobStoreMetrics getMutableBlobStoreMetrics(); + +} diff --git a/src/main/java/com/upserve/uppend/PartitionStats.java b/src/main/java/com/upserve/uppend/PartitionStats.java deleted file mode 100644 index 03d1326f..00000000 --- a/src/main/java/com/upserve/uppend/PartitionStats.java +++ /dev/null @@ -1,144 +0,0 @@ -package com.upserve.uppend; - -import java.util.Objects; - -public class PartitionStats { - - private final int metadataPageCount; - private final int keyPageCount; - private final int blobPageCount; - - private final long metadataLookupMissCount; - private final long metadataLookupHitCount; - - private final long metadataSize; - private final long findKeyTimer; - - private final long flushedKeyCount; - private final long flushCount; - - private final long lookups; - private final long maxLookupSize; - - public PartitionStats(int metadataPageCount, int keyPageCount, int blobPageCount, long metadataLookupMissCount, long metadataLookupHitCount, long metadataSize, long findKeyTimer, long flushedKeyCount, long flushCount, long lookups, long maxLookupSize) { - this.metadataPageCount = metadataPageCount; - this.keyPageCount = keyPageCount; - this.blobPageCount = blobPageCount; - this.metadataLookupMissCount = metadataLookupMissCount; - this.metadataLookupHitCount = metadataLookupHitCount; - this.metadataSize = metadataSize; - this.findKeyTimer = findKeyTimer; - this.flushedKeyCount = flushedKeyCount; - this.flushCount = flushCount; - this.lookups = lookups; - this.maxLookupSize = maxLookupSize; - } - - public static PartitionStats ZERO_STATS = new PartitionStats(0,0,0,0,0, 0, 0, 0 ,0, 0, 0); - - public int getMetadataPageCount() { - return metadataPageCount; - } - - public int getKeyPageCount() { - return keyPageCount; - } - - public int getBlobPageCount() { - return blobPageCount; - } - - public long getMetadataLookupHitCount() { - return metadataLookupHitCount; - } - - public long getMetadataLookupMissCount() { - return metadataLookupMissCount; - } - - public long getMetadataSize() { - return metadataSize; - } - - public long getFindKeyTimer() { - return findKeyTimer; - } - - public long getFlushedKeyCount() { return flushedKeyCount; } - - public long getFlushCount() { return flushCount; } - - public long getLookupCount() { return lookups; } - - public long getMaxLookupSize() { return maxLookupSize; } - - @Override - public String toString() { - return "PartitionStats{" + - "metadataPageCount=" + metadataPageCount + - ", keyPageCount=" + keyPageCount + - ", blobPageCount=" + blobPageCount + - ", metadataLookupMissCount=" + metadataLookupMissCount + - ", metadataLookupHitCount=" + metadataLookupHitCount + - ", metadataSize=" + metadataSize + - ", findKeyTimer=" + findKeyTimer + - ", flushedKeyCount=" + flushedKeyCount + - ", flushCount=" + flushCount + - ", lookups=" + lookups + - ", maxLookupSize=" + maxLookupSize + - '}'; - } - - public String present(PartitionStats previous) { - PartitionStats deltaStats = this.minus(previous); - - long lookupCount = Math.max(1, deltaStats.metadataLookupHitCount + deltaStats.metadataLookupMissCount); - return "PartitionStats{ Deltas: " + - "MDPages=" + deltaStats.metadataPageCount + - ", KeyPages=" + deltaStats.keyPageCount + - ", BlobPages=" + deltaStats.blobPageCount + - ", NewKeys=" + deltaStats.metadataLookupMissCount + - ", ExistingKeys=" + deltaStats.metadataLookupHitCount + - ", MeanLookupTime=" + deltaStats.findKeyTimer / (lookupCount * 1000)+ "us" + - ", flushedKeys=" + deltaStats.flushedKeyCount + - ", flushCount=" + deltaStats.flushCount + - "; Totals:" + - "MeanLookupSize=" + metadataSize / Math.max(lookups, 1) + - ", MaxLookupSize=" + maxLookupSize + - "}"; - } - - public PartitionStats minus(PartitionStats other) { - if (Objects.isNull(other)) throw new NullPointerException("PartitionStats minus method argument is null"); - return new PartitionStats( - metadataPageCount - other.metadataPageCount, - keyPageCount - other.keyPageCount, - blobPageCount - other.blobPageCount, - metadataLookupMissCount - other.metadataLookupMissCount, - metadataLookupHitCount - other.metadataLookupHitCount, - metadataSize - other.metadataSize, - findKeyTimer - other.findKeyTimer, - flushedKeyCount - other.flushedKeyCount, - flushCount - other.flushCount, - lookups - other.lookups, - maxLookupSize - other.maxLookupSize - ); - } - - public PartitionStats add(PartitionStats other) { - if (Objects.isNull(other)) throw new NullPointerException("PartitionStats add method argument is null"); - return new PartitionStats( - metadataPageCount + other.metadataPageCount, - keyPageCount + other.keyPageCount, - blobPageCount + other.blobPageCount, - metadataLookupMissCount + other.metadataLookupMissCount, - metadataLookupHitCount + other.metadataLookupHitCount, - metadataSize + other.metadataSize, - findKeyTimer + other.findKeyTimer, - flushedKeyCount + other.flushedKeyCount, - flushCount + other.flushCount, - lookups + other.lookups, - Math.max(maxLookupSize, other.maxLookupSize) - ); - } -} diff --git a/src/main/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetrics.java b/src/main/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetrics.java index 1ae5a3d7..592eafab 100644 --- a/src/main/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetrics.java @@ -184,13 +184,28 @@ public String getName() { } @Override - public BlockStats getBlockLongStats() { - return store.getBlockLongStats(); + public BlockedLongMetrics getBlockedLongMetrics() { + return store.getBlockedLongMetrics(); } @Override - public PartitionStats getPartitionStats() { - return store.getPartitionStats(); + public BlobStoreMetrics getBlobStoreMetrics() { + return store.getBlobStoreMetrics(); + } + + @Override + public LookupDataMetrics getLookupDataMetrics() { + return store.getLookupDataMetrics(); + } + + @Override + public LongBlobStoreMetrics getLongBlobStoreMetrics() { + return store.getLongBlobStoreMetrics(); + } + + @Override + public MutableBlobStoreMetrics getMutableBlobStoreMetrics() { + return store.getMutableBlobStoreMetrics(); } @Override diff --git a/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java b/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java new file mode 100644 index 00000000..4d183a01 --- /dev/null +++ b/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java @@ -0,0 +1,123 @@ +package com.upserve.uppend.metrics; + +import java.util.*; +import java.util.concurrent.atomic.LongAdder; + +public class BlobStoreMetrics implements InternalMetrics{ + + // Stats for summed over all AppendOnlyBlobStore operations since the Uppend store was opened + private final long bytesAppended; + private final long appendCounter; + private final long appendTimer; + private final long bytesRead; + private final long readCounter; + private final long readTimer; + + // Partition level stats for the life of the blocked long store (Consistent on open) + private final double avgBlobStoreAllocatedPages; + private final long maxBlobStoreAllocatedPages; + private final long sumBlobStoreAllocatedPages; + + public static class Adders { + public final LongAdder bytesAppended = new LongAdder(); + public final LongAdder appendCounter = new LongAdder(); + public final LongAdder appendTimer = new LongAdder(); + public final LongAdder bytesRead = new LongAdder(); + public final LongAdder readCounter = new LongAdder(); + public final LongAdder readTimer = new LongAdder(); + } + + public BlobStoreMetrics(Adders blobStoreMetricsAdders, LongSummaryStatistics blobStoreAllocatedPagesStatistics) { + this( + blobStoreMetricsAdders.bytesAppended.sum(), + blobStoreMetricsAdders.appendCounter.sum(), + blobStoreMetricsAdders.appendTimer.sum(), + blobStoreMetricsAdders.bytesRead.sum(), + blobStoreMetricsAdders.readCounter.sum(), + blobStoreMetricsAdders.readTimer.sum(), + blobStoreAllocatedPagesStatistics.getAverage(), + blobStoreAllocatedPagesStatistics.getMax(), + blobStoreAllocatedPagesStatistics.getSum() + ); + } + + private BlobStoreMetrics( + long bytesAppended, + long appendCounter, + long appendTimer, + long bytesRead, + long readCounter, + long readTimer, + double avgBlobStoreAllocatedPages, + long maxBlobStoreAllocatedPages, + long sumBlobStoreAllocatedPages + + ) { + this.bytesAppended = bytesAppended; + this.appendCounter = appendCounter; + this.appendTimer = appendTimer; + this.bytesRead = bytesRead; + this.readCounter = readCounter; + this.readTimer = readTimer; + this.avgBlobStoreAllocatedPages = avgBlobStoreAllocatedPages; + this.maxBlobStoreAllocatedPages = maxBlobStoreAllocatedPages; + this.sumBlobStoreAllocatedPages = sumBlobStoreAllocatedPages; + } + + @Override + public String toString() { + return "BlobStoreMetrics{" + + "bytesAppended=" + bytesAppended + + ", appendCounter=" + appendCounter + + ", appendTimer=" + appendTimer + + ", bytesRead=" + bytesRead + + ", readCounter=" + readCounter + + ", readTimer=" + readTimer + + ", avgBlobStoreAllocatedPages=" + avgBlobStoreAllocatedPages + + ", maxBlobStoreAllocatedPages=" + maxBlobStoreAllocatedPages + + ", sumBlobStoreAllocatedPages=" + sumBlobStoreAllocatedPages + + '}'; + } + + @Override + public String present(BlobStoreMetrics previous) { + BlobStoreMetrics delta = this.minus(previous); + + return "BlobStoreMetrics: Deltas{" + + String.format("pagesAllocated(%5d), ", delta.sumBlobStoreAllocatedPages) + + String.format( + "appends(%6.2f us/, %7.2f bytes/, %6d #), ", + Prefix.NANO.toMicro(delta.appendTimer) / Math.max(1, delta.appendCounter), + (double) delta.bytesAppended / Math.max(1, delta.appendCounter), + delta.appendCounter + ) + + String.format( + "reads(%6.2f us/, %7.2f b/, %6d #)", + Prefix.NANO.toMicro(delta.readTimer) / Math.max(1, delta.readCounter), + (double) delta.bytesRead / Math.max(1, delta.readCounter), + delta.readCounter + ) + + "}; Absolute{" + + String.format( + "pageCount(%7.2f avg/, %5d max/, %9d #)", + avgBlobStoreAllocatedPages, maxBlobStoreAllocatedPages, sumBlobStoreAllocatedPages + ) + + "};"; + } + + @Override + public BlobStoreMetrics minus(BlobStoreMetrics other) { + if (Objects.isNull(other)) throw new NullPointerException("BlobStoreMetrics minus method argument is null"); + return new BlobStoreMetrics( + bytesAppended - other.bytesAppended, + appendCounter - other.appendCounter, + appendTimer - other.appendTimer, + bytesRead - other.bytesRead, + readCounter - other.readCounter, + readTimer - other.readTimer, + avgBlobStoreAllocatedPages - other.avgBlobStoreAllocatedPages, + maxBlobStoreAllocatedPages - other.maxBlobStoreAllocatedPages, + sumBlobStoreAllocatedPages - other.sumBlobStoreAllocatedPages + ); + } +} diff --git a/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java b/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java new file mode 100644 index 00000000..ee7f3fee --- /dev/null +++ b/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java @@ -0,0 +1,151 @@ +package com.upserve.uppend.metrics; + +import java.util.*; +import java.util.concurrent.atomic.LongAdder; + +public class BlockedLongMetrics implements InternalMetrics { + + // Stats summed over all BlockedLongs stores since the Uppend store was opened + private final long blockAllocationCounter; + private final long appendCounter; + private final long appendTimer; + private final long readCounter; + private final long longsReadCounter; + private final long readTimer; + private final long readLastCounter; + private final long readLastTimer; + + // Partition level stats for the life of the blocked long store (Consistent on open) + private final double avgBlocksAllocated; + private final long maxBlocksAllocated; + private final long sumBlocksAllocated; + // For read only views, AppendCounter numbers are approximate, less than actual + private final double avgAppendCounter; + private final long maxAppendCounter; + private final long sumAppendCounter; + + public static class Adders { + public final LongAdder blockAllocationCounter = new LongAdder(); + public final LongAdder appendCounter = new LongAdder(); + public final LongAdder appendTimer = new LongAdder(); + public final LongAdder readCounter = new LongAdder(); + public final LongAdder longsReadCounter = new LongAdder(); + public final LongAdder readTimer = new LongAdder(); + public final LongAdder readLastCounter = new LongAdder(); + public final LongAdder readLastTimer = new LongAdder(); + } + + public BlockedLongMetrics(Adders blockedLongMetricsAdders, LongSummaryStatistics blockedLongAllocatedBlocksStatistics, + LongSummaryStatistics blockedLongAppendCountStatistics) { + this( + blockedLongMetricsAdders.blockAllocationCounter.sum(), + blockedLongMetricsAdders.appendCounter.sum(), + blockedLongMetricsAdders.appendTimer.sum(), + blockedLongMetricsAdders.readCounter.sum(), + blockedLongMetricsAdders.longsReadCounter.sum(), + blockedLongMetricsAdders.readTimer.sum(), + blockedLongMetricsAdders.readLastCounter.sum(), + blockedLongMetricsAdders.readLastTimer.sum(), + + blockedLongAllocatedBlocksStatistics.getAverage(), + blockedLongAllocatedBlocksStatistics.getMax(), + blockedLongAllocatedBlocksStatistics.getSum(), + + blockedLongAppendCountStatistics.getAverage(), + blockedLongAppendCountStatistics.getMax(), + blockedLongAppendCountStatistics.getSum() + ); + } + + private BlockedLongMetrics( + long blockAllocationCounter, + long appendCounter, + long appendTimer, + long readCounter, + long longsReadCounter, + long readTimer, + long readLastCounter, + long readLastTimer, + double avgBlocksAllocated, + long maxBlocksAllocated, + long sumBlocksAllocated, + double avgAppendCounter, + long maxAppendCounter, + long sumAppendCounter + ) { + this.blockAllocationCounter = blockAllocationCounter; + this.appendCounter = appendCounter; + this.appendTimer = appendTimer; + this.readCounter = readCounter; + this.longsReadCounter = longsReadCounter; + this.readTimer = readTimer; + this.readLastCounter = readLastCounter; + this.readLastTimer = readLastTimer; + this.avgBlocksAllocated = avgBlocksAllocated; + this.maxBlocksAllocated = maxBlocksAllocated; + this.sumBlocksAllocated = sumBlocksAllocated; + this.avgAppendCounter = avgAppendCounter; + this.maxAppendCounter = maxAppendCounter; + this.sumAppendCounter = sumAppendCounter; + } + + @Override + public String toString() { + return "BlockedLongMetrics{" + + "blockAllocationCounter=" + blockAllocationCounter + + ", appendCounter=" + appendCounter + + ", appendTimer=" + appendTimer + + ", readCounter=" + readCounter + + ", longsReadCounter=" + longsReadCounter + + ", readTimer=" + readTimer + + ", readLastCounter=" + readLastCounter + + ", readLastTimer=" + readLastTimer + + ", avgBlocksAllocated=" + avgBlocksAllocated + + ", maxBlocksAllocated=" + maxBlocksAllocated + + ", sumBlocksAllocated=" + sumBlocksAllocated + + ", avgAppendCounter=" + avgAppendCounter + + ", maxAppendCounter=" + maxAppendCounter + + ", sumAppendCounter=" + sumAppendCounter + + '}'; + } + + public String present(BlockedLongMetrics previous) { + BlockedLongMetrics delta = this.minus(previous); + + return "BlockedLongMetrics: Deltas{" + + String.format("blocks(%5d #), ", delta.blockAllocationCounter) + + String.format("appends(%7.2f us/, %6d #), ", Prefix.NANO.toMicro(delta.appendTimer) / Math.max(1, delta.appendCounter), delta.appendCounter) + + String.format( + "reads(%7.2f us/, %7.2f vals/, %6d #), ", + Prefix.NANO.toMicro(delta.readTimer) / Math.max(1, delta.readCounter), + (double) delta.longsReadCounter / Math.max(1, delta.readCounter), + delta.readCounter + ) + + String.format("readLast(%7.2f us/, %6d #)", Prefix.NANO.toMicro(delta.readLastTimer) / Math.max(1, delta.readLastCounter), delta.readLastCounter) + + "}; Absolute{" + + String.format("blocks(%8.2f avg, %6d max, %8d #), ", avgBlocksAllocated, maxBlocksAllocated, sumBlocksAllocated) + + String.format("appends(%10.2f avg, %10d max, %14d #), ", avgAppendCounter, maxAppendCounter, sumAppendCounter) + + String.format("valsPerBlock(%8.2f avg)", (double) sumAppendCounter / Math.max(1, sumBlocksAllocated)) + + "};"; + } + + public BlockedLongMetrics minus(BlockedLongMetrics other) { + if (Objects.isNull(other)) throw new NullPointerException("BlockedLongMetrics minus method argument is null"); + return new BlockedLongMetrics( + blockAllocationCounter - other.blockAllocationCounter, + appendCounter - other.appendCounter, + appendTimer - other.appendTimer, + readCounter - other.readCounter, + longsReadCounter - other.longsReadCounter, + readTimer - other.readTimer, + readLastCounter - other.readLastCounter, + readLastTimer - other.readLastTimer, + avgBlocksAllocated - other.avgBlocksAllocated, + maxBlocksAllocated - other.maxBlocksAllocated, + sumBlocksAllocated - other.sumBlocksAllocated, + avgAppendCounter - other.avgAppendCounter, + maxAppendCounter - other.maxAppendCounter, + sumAppendCounter - other.sumAppendCounter + ); + } +} diff --git a/src/main/java/com/upserve/uppend/metrics/CounterStoreWithMetrics.java b/src/main/java/com/upserve/uppend/metrics/CounterStoreWithMetrics.java index 506cbc9d..cf7619e3 100644 --- a/src/main/java/com/upserve/uppend/metrics/CounterStoreWithMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/CounterStoreWithMetrics.java @@ -167,4 +167,19 @@ public void close() throws Exception { context.stop(); } } + + @Override + public LookupDataMetrics getLookupDataMetrics() { + return store.getLookupDataMetrics(); + } + + @Override + public LongBlobStoreMetrics getLongBlobStoreMetrics() { + return store.getLongBlobStoreMetrics(); + } + + @Override + public MutableBlobStoreMetrics getMutableBlobStoreMetrics() { + return store.getMutableBlobStoreMetrics(); + } } diff --git a/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java b/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java new file mode 100644 index 00000000..2e999cd9 --- /dev/null +++ b/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java @@ -0,0 +1,65 @@ +package com.upserve.uppend.metrics; + +public interface InternalMetrics> { + String toString(); + String present(T previous); + T minus(T other); + + public enum Prefix{ + NANO(1e-9d), + MICRO(1e-6d), + MILLI(1e-3d), + NONE(1d), + KILO(1e3d), + MEGA(1e6d), + GIGA(1e9d); + + private double value; + + Prefix(double value){ + this.value = value; + } + + public double getValue(){ + return value; + } + + public double toNano(double convert){ + return value * convert / NANO.value; + } + + public double toMicro(double convert){ + return value * convert / MICRO.value; + } + + public double toMilli(double convert){ + return value * convert / MILLI.value; + } + + public double toNone(double convert){ + return value * convert / NONE.value; + } + + public double toKilo(double convert){ + return value * convert / KILO.value; + } + + public double toMega(double convert){ + return value * convert / MEGA.value; + } + + public double toGiga(double convert){ + return value * convert / GIGA.value; + } + } + + default String divFormat(double numerator, double denominator, int width){ + return divFormat(numerator, denominator, width, 2); + } + + default String divFormat(double numerator, double denominator, int width, int precision){ + return String.format("%" + width + '.' + precision + 'f', numerator / denominator); + } + + +} diff --git a/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java b/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java new file mode 100644 index 00000000..35cb9d7a --- /dev/null +++ b/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java @@ -0,0 +1,186 @@ +package com.upserve.uppend.metrics; + +import java.util.*; +import java.util.concurrent.atomic.LongAdder; + +public class LongBlobStoreMetrics implements InternalMetrics{ + // Stats summed over all LongBlobStore operations since the Uppend store was opened + private final long bytesAppended; + private final long appendCounter; + private final long appendTimer; + + private final long bytesRead; + private final long readCounter; + private final long readTimer; + + private final long longWrites; + private final long longWritesTimer; + + private final long longReads; + private final long longReadsTimer; + + // Partition level stats for the life of the blocked long store (Consistent on open) + private final double avgLongBlobStoreAllocatedPages; + private final long maxLongBlobStoreAllocatedPages; + private final long sumLongBlobStoreAllocatedPages; + + public static class Adders { + public final LongAdder bytesAppended = new LongAdder(); + public final LongAdder appendCounter = new LongAdder(); + public final LongAdder appendTimer = new LongAdder(); + + public final LongAdder bytesRead = new LongAdder(); + public final LongAdder readCounter = new LongAdder(); + public final LongAdder readTimer = new LongAdder(); + + public final LongAdder longWrites = new LongAdder(); + public final LongAdder longWriteTimer = new LongAdder(); + + public final LongAdder longReads = new LongAdder(); + public final LongAdder longReadTimer = new LongAdder(); + } + + public LongBlobStoreMetrics(Adders blobStoreMetricsAdders, LongSummaryStatistics longblobStoreAllocatedPagesStatistics) { + this( + blobStoreMetricsAdders.bytesAppended.sum(), + blobStoreMetricsAdders.appendCounter.sum(), + blobStoreMetricsAdders.appendTimer.sum(), + + blobStoreMetricsAdders.bytesRead.sum(), + blobStoreMetricsAdders.readCounter.sum(), + blobStoreMetricsAdders.readTimer.sum(), + + blobStoreMetricsAdders.longWrites.sum(), + blobStoreMetricsAdders.longWriteTimer.sum(), + + blobStoreMetricsAdders.longReads.sum(), + blobStoreMetricsAdders.longReadTimer.sum(), + + longblobStoreAllocatedPagesStatistics.getAverage(), + longblobStoreAllocatedPagesStatistics.getMax(), + longblobStoreAllocatedPagesStatistics.getSum() + ); + } + + private LongBlobStoreMetrics( + long bytesAppended, + long appendCounter, + long appendTimer, + + long bytesRead, + long readCounter, + long readTimer, + + long longWrites, + long longWritesTimer, + + long longReads, + long longReadsTimer, + + double avgLongBlobStoreAllocatedPages, + long maxLongBlobStoreAllocatedPages, + long sumLongBlobStoreAllocatedPages + + ) { + this.bytesAppended = bytesAppended; + this.appendCounter = appendCounter; + this.appendTimer = appendTimer; + + this.bytesRead = bytesRead; + this.readCounter = readCounter; + this.readTimer = readTimer; + + this.longWrites = longWrites; + this.longWritesTimer = longWritesTimer; + + this.longReads = longReads; + this.longReadsTimer = longReadsTimer; + + this.avgLongBlobStoreAllocatedPages = avgLongBlobStoreAllocatedPages; + this.maxLongBlobStoreAllocatedPages = maxLongBlobStoreAllocatedPages; + this.sumLongBlobStoreAllocatedPages = sumLongBlobStoreAllocatedPages; + } + + @Override + public String toString() { + return "LongBlobStoreMetrics{" + + "bytesAppended=" + bytesAppended + + ", appendCounter=" + appendCounter + + ", appendTimer=" + appendTimer + + ", bytesRead=" + bytesRead + + ", readCounter=" + readCounter + + ", readTimer=" + readTimer + + ", longWrites=" + longWrites + + ", longWritesTimer=" + longWritesTimer + + ", longReads=" + longReads + + ", longReadsTimer=" + longReadsTimer + + ", avgLongBlobStoreAllocatedPages=" + avgLongBlobStoreAllocatedPages + + ", maxLongBlobStoreAllocatedPages=" + maxLongBlobStoreAllocatedPages + + ", sumLongBlobStoreAllocatedPages=" + sumLongBlobStoreAllocatedPages + + '}'; + } + + @Override + public String present(LongBlobStoreMetrics previous) { + LongBlobStoreMetrics delta = this.minus(previous); + + return "LongBlobStoreMetrics: Deltas{" + + String.format("pagesAllocated(%5d), ", delta.sumLongBlobStoreAllocatedPages) + + String.format( + "appends(%6.2f us/, %7.2f bytes/, %6d #), ", + Prefix.NANO.toMicro(delta.appendTimer) / Math.max(1, delta.appendCounter), + (double) delta.bytesAppended / Math.max(1, delta.appendCounter), + delta.appendCounter + ) + + String.format( + "reads(%6.2f us/, %7.2f b/, %6d #), ", + Prefix.NANO.toMicro(delta.readTimer) / Math.max(1, delta.readCounter), + (double) delta.bytesRead / Math.max(1, delta.readCounter), + delta.readCounter + ) + + + String.format( + "writeLongs(%6.2f us/, %6d #), ", + Prefix.NANO.toMicro(delta.longWritesTimer) / Math.max(1, delta.longWrites), + delta.longWrites + ) + + + String.format( + "readLongs(%6.2f us/, %6d #)", + Prefix.NANO.toMicro(delta.longReadsTimer) / Math.max(1, delta.longReads), + delta.longReads + ) + + + "}; Absolute{" + + String.format( + "pageCount(%7.2f avg/, %5d max/, %9d #)", + avgLongBlobStoreAllocatedPages, maxLongBlobStoreAllocatedPages, sumLongBlobStoreAllocatedPages + ) + + "};"; + } + + @Override + public LongBlobStoreMetrics minus(LongBlobStoreMetrics other) { + if (Objects.isNull(other)) throw new NullPointerException("LongBlobStoreMetrics minus method argument is null"); + return new LongBlobStoreMetrics( + bytesAppended - other.bytesAppended, + appendCounter - other.appendCounter, + appendTimer - other.appendTimer, + + bytesRead - other.bytesRead, + readCounter - other.readCounter, + readTimer - other.readTimer, + + longWrites - other.longWrites, + longWritesTimer - other.longWritesTimer, + + longReads - other.longReads, + longReadsTimer - other.longReadsTimer, + + avgLongBlobStoreAllocatedPages - other.avgLongBlobStoreAllocatedPages, + maxLongBlobStoreAllocatedPages - other.maxLongBlobStoreAllocatedPages, + sumLongBlobStoreAllocatedPages - other.sumLongBlobStoreAllocatedPages + ); + } +} + diff --git a/src/main/java/com/upserve/uppend/metrics/LookupDataMetrics.java b/src/main/java/com/upserve/uppend/metrics/LookupDataMetrics.java new file mode 100644 index 00000000..21d7c430 --- /dev/null +++ b/src/main/java/com/upserve/uppend/metrics/LookupDataMetrics.java @@ -0,0 +1,143 @@ +package com.upserve.uppend.metrics; + +import java.util.*; +import java.util.concurrent.atomic.LongAdder; + +public class LookupDataMetrics implements InternalMetrics { + + private final long flushedKeyCount; + private final long flushCount; + private final long flushTimer; + + private final long lookupMissCount; + private final long lookupHitCount; + + private final long cacheMissCount; + private final long cacheHitCount; + + private final long findKeyTimer; + + private final double avgLookupDataSize; + private final long maxLookupDataSize; + private final long sumLookupDataSize; + + + public static class Adders { + public final LongAdder flushCounter = new LongAdder(); + public final LongAdder flushedKeyCounter = new LongAdder(); + public final LongAdder flushTimer = new LongAdder(); + + public final LongAdder lookupHitCount = new LongAdder(); + public final LongAdder lookupMissCount = new LongAdder(); + public final LongAdder cacheHitCount = new LongAdder(); + public final LongAdder cacheMissCount = new LongAdder(); + public final LongAdder findKeyTimer = new LongAdder(); + } + + public LookupDataMetrics(Adders lookupDataMetricsAdders, LongSummaryStatistics lookupDataSizeStatistics) { + this( + lookupDataMetricsAdders.flushedKeyCounter.sum(), + lookupDataMetricsAdders.flushCounter.sum(), + lookupDataMetricsAdders.flushTimer.sum(), + lookupDataMetricsAdders.lookupMissCount.sum(), + lookupDataMetricsAdders.lookupHitCount.sum(), + lookupDataMetricsAdders.cacheMissCount.sum(), + lookupDataMetricsAdders.cacheHitCount.sum(), + lookupDataMetricsAdders.findKeyTimer.sum(), + lookupDataSizeStatistics.getAverage(), + lookupDataSizeStatistics.getMax(), + lookupDataSizeStatistics.getSum() + ); + } + + private LookupDataMetrics( + long flushedKeyCount, + long flushCount, + long flushTimer, + long lookupMissCount, + long lookupHitCount, + long cacheMissCount, + long cacheHitCount, + long findKeyTimer, + double avgLookupDataSize, + long maxLookupDataSize, + long sumLookupDataSize + ) { + this.flushedKeyCount = flushedKeyCount; + this.flushCount = flushCount; + this.flushTimer = flushTimer; + this.lookupMissCount = lookupMissCount; + this.lookupHitCount = lookupHitCount; + this.cacheMissCount = cacheMissCount; + this.cacheHitCount = cacheHitCount; + this.findKeyTimer = findKeyTimer; + this.avgLookupDataSize = avgLookupDataSize; + this.maxLookupDataSize = maxLookupDataSize; + this.sumLookupDataSize = sumLookupDataSize; + } + + @Override + public String toString() { + return "LookupDataMetrics{" + + "flushedKeyCount=" + flushedKeyCount + + ", flushCount=" + flushCount + + ", flushTimer=" + flushTimer + + ", lookupMissCount=" + lookupMissCount + + ", lookupHitCount=" + lookupHitCount + + ", cacheMissCount=" + cacheMissCount + + ", cacheHitCount=" + cacheHitCount + + ", findKeyTimer=" + findKeyTimer + + ", avgLookupDataSize=" + avgLookupDataSize + + ", maxLookupDataSize=" + maxLookupDataSize + + ", sumLookupDataSize=" + sumLookupDataSize + + '}'; + } + + public String present(LookupDataMetrics previous) { + LookupDataMetrics delta = this.minus(previous); + + return "LookupDataMetrics: Deltas{" + + String.format( + "flush(%7.2f ms/, %6.2f keys/, %5d #), ", + Prefix.NANO.toMilli(delta.flushTimer) / Math.max(1, delta.flushCount), + (double) delta.flushedKeyCount / Math.max(1, delta.flushCount), + delta.flushCount + ) + + String.format("keyLookups(%5.3f%%new, %6d #exist, %6d #new), ", + (double) delta.lookupMissCount / Math.max(1, delta.lookupHitCount + delta.lookupMissCount) * 100, + delta.lookupHitCount, delta.lookupMissCount + ) + + String.format("searchCache(%5.3f%%hit, %6d #), ", + (double) delta.cacheHitCount / Math.max(1, delta.cacheHitCount + delta.cacheMissCount) * 100, + delta.cacheHitCount + delta.cacheMissCount + ) + + String.format( + " findKey(%7.2f us/, %5d #), ", + Prefix.NANO.toMicro(delta.findKeyTimer) / Math.max(1, delta.lookupHitCount + delta.lookupMissCount), + delta.lookupHitCount + delta.lookupMissCount + ) + + "}; Absolute{" + + String.format( + "lookupKeys(%7.2f avg keys/, %5d max keys/, %12d #)", + avgLookupDataSize, maxLookupDataSize, sumLookupDataSize + ) + + "};"; + } + + public LookupDataMetrics minus(LookupDataMetrics other) { + if (Objects.isNull(other)) throw new NullPointerException("LookupDataMetrics minus method argument is null"); + return new LookupDataMetrics( + flushedKeyCount - other.flushedKeyCount, + flushCount - other.flushCount, + flushTimer - other.flushTimer, + lookupMissCount - other.lookupMissCount, + lookupHitCount - other.lookupHitCount, + cacheMissCount - other.cacheMissCount, + cacheHitCount - other.cacheHitCount, + findKeyTimer - other.findKeyTimer, + avgLookupDataSize - other.avgLookupDataSize, + maxLookupDataSize - other.maxLookupDataSize, + sumLookupDataSize - other.sumLookupDataSize + ); + } +} diff --git a/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java b/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java new file mode 100644 index 00000000..9b66d8d6 --- /dev/null +++ b/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java @@ -0,0 +1,123 @@ +package com.upserve.uppend.metrics; + +import java.util.*; +import java.util.concurrent.atomic.LongAdder; + +public class MutableBlobStoreMetrics implements InternalMetrics{ + // Stats for summed over all MutableBlobStoreMetrics operations since the Uppend store was opened + private final long bytesWritten; + private final long writeCounter; + private final long writeTimer; + private final long bytesRead; + private final long readCounter; + private final long readTimer; + + // Partition level stats for the life of the blocked long store (Consistent on open) + private final double avgMutableBlobStoreAllocatedPages; + private final long maxMutableBlobStoreAllocatedPages; + private final long sumMutableBlobStoreAllocatedPages; + + public static class Adders { + public final LongAdder bytesWritten = new LongAdder(); + public final LongAdder writeCounter = new LongAdder(); + public final LongAdder writeTimer = new LongAdder(); + public final LongAdder bytesRead = new LongAdder(); + public final LongAdder readCounter = new LongAdder(); + public final LongAdder readTimer = new LongAdder(); + } + + + public MutableBlobStoreMetrics(Adders mutableBlobStoreMetricsAdders, LongSummaryStatistics mutableStoreAllocatedPagesStatistics) { + this( + mutableBlobStoreMetricsAdders.bytesWritten.sum(), + mutableBlobStoreMetricsAdders.writeCounter.sum(), + mutableBlobStoreMetricsAdders.writeTimer.sum(), + mutableBlobStoreMetricsAdders.bytesRead.sum(), + mutableBlobStoreMetricsAdders.readCounter.sum(), + mutableBlobStoreMetricsAdders.readTimer.sum(), + mutableStoreAllocatedPagesStatistics.getAverage(), + mutableStoreAllocatedPagesStatistics.getMax(), + mutableStoreAllocatedPagesStatistics.getSum() + ); + } + + private MutableBlobStoreMetrics( + long bytesWritten, + long writeCounter, + long writeTimer, + long bytesRead, + long readCounter, + long readTimer, + double avgMutableBlobStoreAllocatedPages, + long maxMutableBlobStoreAllocatedPages, + long sumMutableBlobStoreAllocatedPages + + ) { + this.bytesWritten = bytesWritten; + this.writeCounter = writeCounter; + this.writeTimer = writeTimer; + this.bytesRead = bytesRead; + this.readCounter = readCounter; + this.readTimer = readTimer; + this.avgMutableBlobStoreAllocatedPages = avgMutableBlobStoreAllocatedPages; + this.maxMutableBlobStoreAllocatedPages = maxMutableBlobStoreAllocatedPages; + this.sumMutableBlobStoreAllocatedPages = sumMutableBlobStoreAllocatedPages; + } + + @Override + public String toString() { + return "MutableBlobStoreMetrics{" + + "bytesWritten=" + bytesWritten + + ", writeCounter=" + writeCounter + + ", writeTimer=" + writeTimer + + ", bytesRead=" + bytesRead + + ", readCounter=" + readCounter + + ", readTimer=" + readTimer + + ", avgMutableBlobStoreAllocatedPages=" + avgMutableBlobStoreAllocatedPages + + ", maxMutableBlobStoreAllocatedPages=" + maxMutableBlobStoreAllocatedPages + + ", sumMutableBlobStoreAllocatedPages=" + sumMutableBlobStoreAllocatedPages + + '}'; + } + + @Override + public String present(MutableBlobStoreMetrics previous) { + MutableBlobStoreMetrics delta = this.minus(previous); + + return "MutableBlobStoreMetrics: Deltas{" + + String.format("pagesAllocated(%5d), ", delta.sumMutableBlobStoreAllocatedPages) + + String.format( + "writes(%6.2f us/, %7.2f bytes/, %6d #), ", + InternalMetrics.Prefix.NANO.toMicro(delta.writeTimer) / Math.max(1, delta.writeCounter), + (double) delta.bytesWritten / Math.max(1, delta.writeCounter), + delta.writeCounter + ) + + String.format( + "reads(%6.2f us/, %7.2f b/, %6d #)", + InternalMetrics.Prefix.NANO.toMicro(delta.readTimer) / Math.max(1, delta.readCounter), + (double) delta.bytesRead / Math.max(1, delta.readCounter), + delta.readCounter + ) + + "}; Absolute{" + + String.format( + "pageCount(%7.2f avg/, %5d max/, %9d #)", + avgMutableBlobStoreAllocatedPages, maxMutableBlobStoreAllocatedPages, sumMutableBlobStoreAllocatedPages + ) + + "};"; + } + + @Override + public MutableBlobStoreMetrics minus(MutableBlobStoreMetrics other) { + if (Objects.isNull(other)) throw new NullPointerException("MutableBlobStoreMetrics minus method argument is null"); + return new MutableBlobStoreMetrics( + bytesWritten - other.bytesWritten, + writeCounter - other.writeCounter, + writeTimer - other.writeTimer, + bytesRead - other.bytesRead, + readCounter - other.readCounter, + readTimer - other.readTimer, + avgMutableBlobStoreAllocatedPages - other.avgMutableBlobStoreAllocatedPages, + maxMutableBlobStoreAllocatedPages - other.maxMutableBlobStoreAllocatedPages, + sumMutableBlobStoreAllocatedPages - other.sumMutableBlobStoreAllocatedPages + ); + } +} \ No newline at end of file diff --git a/src/test/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetricsTest.java b/src/test/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetricsTest.java index a49e5b46..f0d9bb2b 100644 --- a/src/test/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetricsTest.java +++ b/src/test/java/com/upserve/uppend/metrics/AppendOnlyStoreWithMetricsTest.java @@ -169,9 +169,9 @@ public void testDeregister() { @Test public void testGetBlockLongStats() { - BlockStats v = new BlockStats(0, 0, 0, 0, 0); - when(store.getBlockLongStats()).thenReturn(v); - assertEquals(v, instance.getBlockLongStats()); + BlockedLongMetrics v = new BlockedLongMetrics(new BlockedLongMetrics.Adders(), new LongSummaryStatistics(), new LongSummaryStatistics()); + when(store.getBlockedLongMetrics()).thenReturn(v); + assertEquals(v, instance.getBlockedLongMetrics()); } @Test From 8dc0ee9a4aa67d2049995cf69e4ab6c58550db53 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Mon, 5 Aug 2019 20:06:27 -0400 Subject: [PATCH 10/22] Use Builder objects to pass metrics adders --- .../uppend/AppendOnlyStoreBuilder.java | 9 +- .../upserve/uppend/AppendStorePartition.java | 139 ++++++++++++------ .../upserve/uppend/CounterStorePartition.java | 63 ++++++-- .../upserve/uppend/FileAppendOnlyStore.java | 64 ++++++-- .../com/upserve/uppend/FileCounterStore.java | 22 ++- .../java/com/upserve/uppend/FileStore.java | 50 ++++++- .../com/upserve/uppend/FileStoreBuilder.java | 13 ++ .../java/com/upserve/uppend/Partition.java | 29 ++-- .../uppend/ReadOnlyAppendOnlyStore.java | 8 +- .../upserve/uppend/ReadOnlyCounterStore.java | 2 +- .../com/upserve/uppend/FileStoreTest.java | 3 + 11 files changed, 304 insertions(+), 98 deletions(-) diff --git a/src/main/java/com/upserve/uppend/AppendOnlyStoreBuilder.java b/src/main/java/com/upserve/uppend/AppendOnlyStoreBuilder.java index 2e7a78ee..c3aa32c8 100644 --- a/src/main/java/com/upserve/uppend/AppendOnlyStoreBuilder.java +++ b/src/main/java/com/upserve/uppend/AppendOnlyStoreBuilder.java @@ -1,6 +1,6 @@ package com.upserve.uppend; -import com.upserve.uppend.metrics.AppendOnlyStoreWithMetrics; +import com.upserve.uppend.metrics.*; public class AppendOnlyStoreBuilder extends FileStoreBuilder { // Blocked Longs Config Options @@ -13,6 +13,9 @@ public class AppendOnlyStoreBuilder extends FileStoreBuilder val > 0) - .summaryStatistics(); - - return new PartitionStats(metadataBlobFile.getAllocatedPageCount(), - longKeyFile.getAllocatedPageCount(), - blobFile.getAllocatedPageCount(), - Arrays.stream(lookups).mapToLong(LookupData::getMetadataLookupMissCount).sum(), - Arrays.stream(lookups).mapToLong(LookupData::getMetadataLookupHitCount).sum(), - metadataStats.getSum(), - Arrays.stream(lookups).mapToLong(LookupData::getFindKeyTimer).sum(), - Arrays.stream(lookups).mapToLong(LookupData::getFlushedKeyCount).sum(), - Arrays.stream(lookups).mapToLong(LookupData::getFlushCount).sum(), - metadataStats.getCount(), - metadataStats.getMax() - ); - } - - private AppendStorePartition(VirtualPageFile longKeyFile, VirtualPageFile metadataBlobFile, VirtualPageFile blobsFile, BlockedLongs blocks, int hashCount, int flushThreshold, int reloadInterval, boolean readOnly) { - super(longKeyFile, metadataBlobFile, hashCount, flushThreshold, reloadInterval, readOnly); + BlockedLongs blocks = new BlockedLongs( + blocksFile(partitionDir), + builder.getBlobsPerBlock(), + readOnly, + builder.getBlockedLongMetricsAdders() + ); + + VirtualPageFile blobs = new VirtualPageFile( + blobsFile(partitionDir), + builder.getLookupHashCount(), + builder.getBlobPageSize(), + builder.getTargetBufferSize(), + readOnly + ); + VirtualPageFile metadata = new VirtualPageFile( + metadataPath(partitionDir), + builder.getLookupHashCount(), + builder.getMetadataPageSize(), + adjustedTargetBufferSize( + builder.getMetadataPageSize(), + builder.getLookupHashCount(), + builder.getTargetBufferSize() + ), + readOnly + ); + VirtualPageFile keys = new VirtualPageFile( + keysPath(partitionDir), + builder.getLookupHashCount(), + builder.getLookupPageSize(), + adjustedTargetBufferSize( + builder.getLookupPageSize(), + builder.getLookupHashCount(), + builder.getTargetBufferSize() + ), + readOnly + ); + + return new AppendStorePartition(keys, metadata, blobs, blocks, readOnly, builder); + } + + private AppendStorePartition( + VirtualPageFile longKeyFile, VirtualPageFile metadataBlobFile, VirtualPageFile blobsFile, + BlockedLongs blocks, boolean readOnly, AppendOnlyStoreBuilder builder) { + super(longKeyFile, metadataBlobFile, readOnly, builder); this.blocks = blocks; this.blobFile = blobsFile; + blobs = IntStream.range(0, hashCount) - .mapToObj(virtualFileNumber -> new VirtualAppendOnlyBlobStore(virtualFileNumber, blobsFile)) + .mapToObj(virtualFileNumber -> new VirtualAppendOnlyBlobStore( + virtualFileNumber, blobsFile, builder.getBlobStoreMetricsAdders()) + ) .toArray(VirtualAppendOnlyBlobStore[]::new); } @@ -147,10 +194,6 @@ Stream keys() { .flatMap(virtualFileNumber -> lookups[virtualFileNumber].keys().map(LookupKey::string)); } - BlockStats blockedLongStats() { - return blocks.stats(); - } - void clear() throws IOException { longKeyFile.close(); metadataBlobFile.close(); diff --git a/src/main/java/com/upserve/uppend/CounterStorePartition.java b/src/main/java/com/upserve/uppend/CounterStorePartition.java index f7214306..4b0b374a 100644 --- a/src/main/java/com/upserve/uppend/CounterStorePartition.java +++ b/src/main/java/com/upserve/uppend/CounterStorePartition.java @@ -16,29 +16,68 @@ public class CounterStorePartition extends Partition { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - public static CounterStorePartition createPartition(Path parentDir, String partition, int hashCount, int targetBufferSize, int flushThreshold, int reloadInterval, int metadataPageSize, int keyPageSize) { + public static CounterStorePartition createPartition(Path parentDir, String partition, CounterStoreBuilder builder) { Path partitionDir = validatePartition(parentDir, partition); - VirtualPageFile metadata = new VirtualPageFile(metadataPath(partitionDir), hashCount, metadataPageSize, adjustedTargetBufferSize(metadataPageSize, hashCount, targetBufferSize), false); - VirtualPageFile keys = new VirtualPageFile(keysPath(partitionDir), hashCount, keyPageSize, adjustedTargetBufferSize(keyPageSize, hashCount, targetBufferSize), false); - - return new CounterStorePartition(keys, metadata, hashCount, flushThreshold, reloadInterval, false); + VirtualPageFile metadata = new VirtualPageFile( + metadataPath(partitionDir), + builder.getLookupHashCount(), + builder.getMetadataPageSize(), + adjustedTargetBufferSize( + builder.getMetadataPageSize(), + builder.getLookupHashCount(), + builder.getTargetBufferSize() + ), + false + ); + VirtualPageFile keys = new VirtualPageFile( + keysPath(partitionDir), + builder.getLookupHashCount(), + builder.getLookupPageSize(), + adjustedTargetBufferSize( + builder.getLookupPageSize(), + builder.getLookupHashCount(), + builder.getTargetBufferSize() + ), + false + ); + + return new CounterStorePartition(keys, metadata, false, builder); } - public static CounterStorePartition openPartition(Path partentDir, String partition, int hashCount, int targetBufferSize, int flushThreshold, int reloadInterval, int metadataPageSize, int keyPageSize, boolean readOnly) { + public static CounterStorePartition openPartition(Path partentDir, String partition, boolean readOnly, CounterStoreBuilder builder) { validatePartition(partition); Path partitiondDir = partentDir.resolve(partition); if (!(Files.exists(metadataPath(partitiondDir)) && Files.exists(keysPath(partitiondDir)))) return null; - VirtualPageFile metadata = new VirtualPageFile(metadataPath(partitiondDir), hashCount, metadataPageSize, adjustedTargetBufferSize(metadataPageSize, hashCount, targetBufferSize), readOnly); - VirtualPageFile keys = new VirtualPageFile(keysPath(partitiondDir), hashCount, keyPageSize, targetBufferSize, readOnly); - - return new CounterStorePartition(keys, metadata, hashCount, flushThreshold, reloadInterval, false); + VirtualPageFile metadata = new VirtualPageFile( + metadataPath(partitiondDir), + builder.getLookupHashCount(), + builder.getMetadataPageSize(), + adjustedTargetBufferSize( + builder.getMetadataPageSize(), + builder.getLookupHashCount(), + builder.getTargetBufferSize() + ), + readOnly + ); + VirtualPageFile keys = new VirtualPageFile( + keysPath(partitiondDir), + builder.getLookupHashCount(), + builder.getLookupPageSize(), + adjustedTargetBufferSize( + builder.getLookupPageSize(), + builder.getLookupHashCount(), + builder.getTargetBufferSize() + ), + readOnly); + + return new CounterStorePartition(keys, metadata, readOnly, builder); } - private CounterStorePartition(VirtualPageFile longKeyFile, VirtualPageFile metadataBlobFile, int hashCount, int flushThreshold, int reloadInterval, boolean readOnly) { - super(longKeyFile, metadataBlobFile, hashCount, flushThreshold, reloadInterval, readOnly); + private CounterStorePartition(VirtualPageFile longKeyFile, VirtualPageFile metadataBlobFile, boolean readOnly, CounterStoreBuilder builder) { + super(longKeyFile, metadataBlobFile, readOnly, builder); } public Long set(String key, long value) { diff --git a/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java b/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java index cba30293..86f72699 100644 --- a/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java +++ b/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java @@ -1,5 +1,8 @@ package com.upserve.uppend; +import com.upserve.uppend.lookup.LookupData; +import com.upserve.uppend.metrics.*; +import com.upserve.uppend.metrics.LookupDataMetrics; import org.slf4j.Logger; import java.lang.invoke.MethodHandles; @@ -13,12 +16,18 @@ public class FileAppendOnlyStore extends FileStore impleme private final Function openPartitionFunction; private final Function createPartitionFunction; + final BlobStoreMetrics.Adders blobStoreMetricsAdders; + final BlockedLongMetrics.Adders blockedLongMetricsAdders; + FileAppendOnlyStore(boolean readOnly, AppendOnlyStoreBuilder builder) { - super(builder.getDir(), builder.getFlushDelaySeconds(), builder.getPartitionCount(), readOnly, builder.getStoreName()); + super(readOnly, builder); + + openPartitionFunction = partitionKey -> AppendStorePartition.openPartition(partitionsDir, partitionKey, readOnly, builder); - openPartitionFunction = partitionKey -> AppendStorePartition.openPartition(partitionsDir, partitionKey, builder.getLookupHashCount(), builder.getTargetBufferSize(), builder.getFlushThreshold(), builder.getMetadataTTL(), builder.getMetadataPageSize(), builder.getBlobsPerBlock(), builder.getBlobPageSize(), builder.getLookupPageSize(), readOnly); + createPartitionFunction = partitionKey -> AppendStorePartition.createPartition(partitionsDir, partitionKey, builder); - createPartitionFunction = partitionKey -> AppendStorePartition.createPartition(partitionsDir, partitionKey, builder.getLookupHashCount(), builder.getTargetBufferSize(), builder.getFlushThreshold(), builder.getMetadataTTL(), builder.getMetadataPageSize(), builder.getBlobsPerBlock(), builder.getBlobPageSize(), builder.getLookupPageSize()); + blobStoreMetricsAdders = builder.getBlobStoreMetricsAdders(); + blockedLongMetricsAdders = builder.getBlockedLongMetricsAdders(); } @Override @@ -26,16 +35,6 @@ public String getName() { return name; } - @Override - public BlockStats getBlockLongStats() { - return partitionMap.values().parallelStream().map(AppendStorePartition::blockedLongStats).reduce(BlockStats.ZERO_STATS, BlockStats::add); - } - - @Override - public PartitionStats getPartitionStats(){ - return partitionMap.values().parallelStream().map(AppendStorePartition::getPartitionStats).reduce(PartitionStats.ZERO_STATS, PartitionStats::add); - } - @Override public long keyCount() { return streamPartitions() @@ -102,4 +101,43 @@ Function getOpenPartitionFunction() { Function getCreatePartitionFunction() { return createPartitionFunction; } + + @Override + public BlockedLongMetrics getBlockedLongMetrics() { + LongSummaryStatistics blockedLongAllocatedBlocksStatistics = streamPartitions() + .mapToLong(partition -> partition.blocks.getBlockCount()) + .summaryStatistics(); + + LongSummaryStatistics blockedLongAppendCountStatistics = streamPartitions() + .mapToLong(partition -> partition.blocks.getCount()) + .summaryStatistics(); + + return new BlockedLongMetrics( + blockedLongMetricsAdders, blockedLongAllocatedBlocksStatistics, blockedLongAppendCountStatistics + ); + } + + @Override + public BlobStoreMetrics getBlobStoreMetrics() { + LongSummaryStatistics blobStoreAllocatedPagesStatistics = streamPartitions() + .mapToLong(partition -> partition.blobFile.getAllocatedPageCount()) + .summaryStatistics(); + + return new BlobStoreMetrics(blobStoreMetricsAdders, blobStoreAllocatedPagesStatistics); + } + + @Override + public LookupDataMetrics getLookupDataMetrics() { + return super.getLookupDataMetrics(); + } + + @Override + public MutableBlobStoreMetrics getMutableBlobStoreMetrics() { + return super.getMutableBlobStoreMetrics(); + } + + @Override + public LongBlobStoreMetrics getLongBlobStoreMetrics() { + return super.getLongBlobStoreMetrics(); + } } diff --git a/src/main/java/com/upserve/uppend/FileCounterStore.java b/src/main/java/com/upserve/uppend/FileCounterStore.java index d32bf9e7..fe6d195e 100644 --- a/src/main/java/com/upserve/uppend/FileCounterStore.java +++ b/src/main/java/com/upserve/uppend/FileCounterStore.java @@ -1,5 +1,6 @@ package com.upserve.uppend; +import com.upserve.uppend.metrics.*; import org.slf4j.Logger; import java.lang.invoke.MethodHandles; @@ -14,10 +15,10 @@ public class FileCounterStore extends FileStore implement private final Function createPartitionFunction; FileCounterStore(boolean readOnly, CounterStoreBuilder builder) { - super(builder.getDir(), builder.getFlushDelaySeconds(), builder.getPartitionCount(), readOnly, builder.getStoreName()); + super(readOnly, builder); - openPartitionFunction = partitionKey -> CounterStorePartition.openPartition(partitionsDir, partitionKey, builder.getLookupHashCount(), builder.getTargetBufferSize(), builder.getFlushThreshold(), builder.getMetadataTTL(), builder.getMetadataPageSize(), builder.getLookupPageSize(), readOnly); - createPartitionFunction = partitionKey -> CounterStorePartition.createPartition(partitionsDir, partitionKey, builder.getLookupHashCount(), builder.getTargetBufferSize(), builder.getFlushThreshold(), builder.getMetadataTTL(), builder.getMetadataPageSize(), builder.getLookupPageSize()); + openPartitionFunction = partitionKey -> CounterStorePartition.openPartition(partitionsDir, partitionKey, readOnly, builder); + createPartitionFunction = partitionKey -> CounterStorePartition.createPartition(partitionsDir, partitionKey, builder); } @Override @@ -82,4 +83,19 @@ Function getCreatePartitionFunction() { return createPartitionFunction; } + + @Override + public LookupDataMetrics getLookupDataMetrics(){ + return super.getLookupDataMetrics(); + } + + @Override + public LongBlobStoreMetrics getLongBlobStoreMetrics(){ + return super.getLongBlobStoreMetrics(); + } + + @Override + public MutableBlobStoreMetrics getMutableBlobStoreMetrics(){ + return super.getMutableBlobStoreMetrics(); + } } diff --git a/src/main/java/com/upserve/uppend/FileStore.java b/src/main/java/com/upserve/uppend/FileStore.java index 8bb455b6..095bc302 100644 --- a/src/main/java/com/upserve/uppend/FileStore.java +++ b/src/main/java/com/upserve/uppend/FileStore.java @@ -1,6 +1,8 @@ package com.upserve.uppend; import com.google.common.hash.*; +import com.upserve.uppend.lookup.LookupData; +import com.upserve.uppend.metrics.*; import com.upserve.uppend.util.SafeDeleting; import org.slf4j.Logger; @@ -34,22 +36,29 @@ abstract class FileStore implements AutoCloseable, Register private final int partitionCount; private final boolean doHashPartitionValues; + final LookupDataMetrics.Adders lookupDataMetricsAdders; + final LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders; + final MutableBlobStoreMetrics.Adders mutableBlobStoreMetricsAdders; + final AtomicBoolean isClosed; private static final int PARTITION_HASH_SEED = 626433832; private final HashFunction hashFunction = Hashing.murmur3_32(PARTITION_HASH_SEED); - FileStore(Path dir, int flushDelaySeconds, int partitionCount, boolean readOnly, String name) { + FileStore(boolean readOnly, FileStoreBuilder builder) { + + dir = builder.getDir(); if (dir == null) { throw new NullPointerException("null dir"); } - this.dir = dir; try { Files.createDirectories((Files.isSymbolicLink(dir) ? Files.readSymbolicLink(dir).toRealPath() : dir)); } catch (IOException e) { throw new UncheckedIOException("unable to mkdirs: " + dir, e); } partitionsDir = dir.resolve("partitions"); + + int partitionCount = builder.getPartitionCount(); if (partitionCount > MAX_NUM_PARTITIONS) { throw new IllegalArgumentException("bad partition count: greater than max (" + MAX_NUM_PARTITIONS + "): " + partitionCount); } @@ -64,9 +73,9 @@ abstract class FileStore implements AutoCloseable, Register partitionMap = new ConcurrentHashMap<>(partitionCount); doHashPartitionValues = true; } - this.name = name; + this.name = builder.getStoreName(); - this.flushDelaySeconds = flushDelaySeconds; + flushDelaySeconds = builder.getFlushDelaySeconds(); if (!readOnly && flushDelaySeconds > 0) register(flushDelaySeconds); this.readOnly = readOnly; @@ -82,6 +91,10 @@ abstract class FileStore implements AutoCloseable, Register } isClosed = new AtomicBoolean(false); + + this.lookupDataMetricsAdders = builder.getLookupDataMetricsAdders(); + this.longBlobStoreMetricsAdders = builder.getLongBlobStoreMetricsAdders(); + this.mutableBlobStoreMetricsAdders = builder.getMutableBlobStoreMetricsAdders(); } String partitionHash(String partition) { @@ -240,4 +253,33 @@ private void closePartitions(){ } partitionMap.clear(); } + + public LookupDataMetrics getLookupDataMetrics(){ + LongSummaryStatistics metaDataSizeStats = streamPartitions() + .flatMapToLong( + partition -> Arrays.stream(partition.lookups) + .mapToLong(LookupData::getMetadataSize) + ) + .summaryStatistics(); + + return new LookupDataMetrics(lookupDataMetricsAdders, metaDataSizeStats); + } + + public MutableBlobStoreMetrics getMutableBlobStoreMetrics() { + LongSummaryStatistics mutableBlobStoreAllocatedPagesStatistics = streamPartitions() + .mapToLong(partition -> partition.metadataBlobFile.getAllocatedPageCount()) + .summaryStatistics(); + + return new MutableBlobStoreMetrics(mutableBlobStoreMetricsAdders, mutableBlobStoreAllocatedPagesStatistics); + } + + public LongBlobStoreMetrics getLongBlobStoreMetrics() { + LongSummaryStatistics longBlobStoreAllocatedPagesStatistics = streamPartitions() + .mapToLong(partition -> partition.longKeyFile.getAllocatedPageCount()) + .summaryStatistics(); + + return new LongBlobStoreMetrics(longBlobStoreMetricsAdders, longBlobStoreAllocatedPagesStatistics); + } + + } diff --git a/src/main/java/com/upserve/uppend/FileStoreBuilder.java b/src/main/java/com/upserve/uppend/FileStoreBuilder.java index 4177cdf1..a40c1835 100644 --- a/src/main/java/com/upserve/uppend/FileStoreBuilder.java +++ b/src/main/java/com/upserve/uppend/FileStoreBuilder.java @@ -1,6 +1,8 @@ package com.upserve.uppend; import com.codahale.metrics.MetricRegistry; +import com.upserve.uppend.metrics.*; + import java.nio.file.Path; public class FileStoreBuilder> { @@ -38,6 +40,11 @@ public class FileStoreBuilder> { private MetricRegistry cacheMetricsRegistry = null; private boolean cacheMetrics = false; + private final LookupDataMetrics.Adders lookupDataMetricsAdders = new LookupDataMetrics.Adders(); + private final MutableBlobStoreMetrics.Adders mutableBlobStoreMetricsAdders = new MutableBlobStoreMetrics.Adders(); + private final LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders = new LongBlobStoreMetrics.Adders(); + + // Long lookup Cache Options @SuppressWarnings("unchecked") public T withLongLookupHashCount(int longLookupHashCount) { @@ -172,6 +179,12 @@ public String getStoreName() { public String getMetricsRootName(){ return metricsRootName; } + public LookupDataMetrics.Adders getLookupDataMetricsAdders(){ return lookupDataMetricsAdders; } + + public MutableBlobStoreMetrics.Adders getMutableBlobStoreMetricsAdders() { return mutableBlobStoreMetricsAdders; } + + public LongBlobStoreMetrics.Adders getLongBlobStoreMetricsAdders() { return longBlobStoreMetricsAdders; } + @Override public String toString() { return "FileStoreBuilder{" + diff --git a/src/main/java/com/upserve/uppend/Partition.java b/src/main/java/com/upserve/uppend/Partition.java index 80c5cdea..856679c1 100644 --- a/src/main/java/com/upserve/uppend/Partition.java +++ b/src/main/java/com/upserve/uppend/Partition.java @@ -26,11 +26,12 @@ public abstract class Partition implements Flushable, Closeable, Trimmable { final LookupData[] lookups; - Partition(VirtualPageFile longKeyFile, VirtualPageFile metadataBlobFile, int hashCount, int flushThreshold, int reloadInterval, boolean readOnly) { + Partition(VirtualPageFile longKeyFile, VirtualPageFile metadataBlobFile, boolean readOnly, FileStoreBuilder builder) { + this.longKeyFile = longKeyFile; this.metadataBlobFile = metadataBlobFile; - this.hashCount = hashCount; + this.hashCount = builder.getLookupHashCount(); this.readOnly = readOnly; if (hashCount < 1) { @@ -46,10 +47,10 @@ public abstract class Partition implements Flushable, Closeable, Trimmable { hashFunction = Hashing.murmur3_32(HASH_SEED); } - IntFunction constructorFuntion = lookupDataFunction(readOnly, flushThreshold, reloadInterval); + IntFunction constructorFunction = lookupDataFunction(readOnly, builder.getFlushThreshold(), builder.getMetadataTTL(), builder); lookups = IntStream.range(0, hashCount) - .mapToObj(constructorFuntion) + .mapToObj(constructorFunction) .toArray(LookupData[]::new); } @@ -64,18 +65,20 @@ static Path validatePartition(Path parentDir, String partition) { return partitionDir; } - private IntFunction lookupDataFunction(boolean readOnly, int flushThreshold, int relaodInterval) { + private IntFunction lookupDataFunction(boolean readOnly, int flushThreshold, int relaodInterval, FileStoreBuilder builder) { if (readOnly) { return virtualFileNumber -> LookupData.lookupReader( - new VirtualLongBlobStore(virtualFileNumber, longKeyFile), - new VirtualMutableBlobStore(virtualFileNumber, metadataBlobFile), - relaodInterval + new VirtualLongBlobStore(virtualFileNumber, longKeyFile, builder.getLongBlobStoreMetricsAdders()), + new VirtualMutableBlobStore(virtualFileNumber, metadataBlobFile, builder.getMutableBlobStoreMetricsAdders()), + relaodInterval, + builder.getLookupDataMetricsAdders() ); } else { return virtualFileNumber -> LookupData.lookupWriter( - new VirtualLongBlobStore(virtualFileNumber, longKeyFile), - new VirtualMutableBlobStore(virtualFileNumber, metadataBlobFile), - flushThreshold + new VirtualLongBlobStore(virtualFileNumber, longKeyFile, builder.getLongBlobStoreMetricsAdders()), + new VirtualMutableBlobStore(virtualFileNumber, metadataBlobFile, builder.getMutableBlobStoreMetricsAdders()), + flushThreshold, + builder.getLookupDataMetricsAdders() ); } } @@ -145,6 +148,10 @@ public void close() throws IOException { metadataBlobFile.close(); } + public VirtualPageFile getLongKeyFile() { return longKeyFile; } + + public VirtualPageFile getMetadataBlobFile() {return metadataBlobFile; } + private static boolean isValidPartitionCharStart(char c) { return Character.isJavaIdentifierPart(c); } diff --git a/src/main/java/com/upserve/uppend/ReadOnlyAppendOnlyStore.java b/src/main/java/com/upserve/uppend/ReadOnlyAppendOnlyStore.java index 660e346c..17fdb224 100644 --- a/src/main/java/com/upserve/uppend/ReadOnlyAppendOnlyStore.java +++ b/src/main/java/com/upserve/uppend/ReadOnlyAppendOnlyStore.java @@ -1,5 +1,7 @@ package com.upserve.uppend; +import com.upserve.uppend.metrics.*; + import java.util.Map; import java.util.function.BiConsumer; import java.util.stream.Stream; @@ -7,7 +9,7 @@ /** * Reader interface to an append-only store */ -public interface ReadOnlyAppendOnlyStore extends Trimmable, AutoCloseable { +public interface ReadOnlyAppendOnlyStore extends Trimmable, AutoCloseable, KeyStoreMetrics, AppendStoreMetrics { /** * Read byte arrays that have been stored under a given partition and key in * parallel @@ -66,9 +68,5 @@ public interface ReadOnlyAppendOnlyStore extends Trimmable, AutoCloseable { */ void scan(BiConsumer> callback); - BlockStats getBlockLongStats(); - - PartitionStats getPartitionStats(); - long keyCount(); } diff --git a/src/main/java/com/upserve/uppend/ReadOnlyCounterStore.java b/src/main/java/com/upserve/uppend/ReadOnlyCounterStore.java index 56ee645a..cffeebd4 100644 --- a/src/main/java/com/upserve/uppend/ReadOnlyCounterStore.java +++ b/src/main/java/com/upserve/uppend/ReadOnlyCounterStore.java @@ -4,7 +4,7 @@ import java.util.function.ObjLongConsumer; import java.util.stream.Stream; -public interface ReadOnlyCounterStore extends Trimmable, AutoCloseable { +public interface ReadOnlyCounterStore extends Trimmable, AutoCloseable, KeyStoreMetrics { /** * Get the value for a given partition and key * diff --git a/src/test/java/com/upserve/uppend/FileStoreTest.java b/src/test/java/com/upserve/uppend/FileStoreTest.java index 524b955c..2a88b8a3 100644 --- a/src/test/java/com/upserve/uppend/FileStoreTest.java +++ b/src/test/java/com/upserve/uppend/FileStoreTest.java @@ -208,6 +208,7 @@ public void testReaderWriter() throws InterruptedException { writer.append("foo", "bar", "abc".getBytes()); writer.flush(); + // The first time we load a partition it will get the latest flush keys. assertEquals( Collections.singletonList(ByteBuffer.wrap("abc".getBytes())), reader.read("foo","bar").map(ByteBuffer::wrap).collect(Collectors.toList()) @@ -220,12 +221,14 @@ public void testReaderWriter() throws InterruptedException { writer.append("foo", "baz", "def".getBytes()); writer.flush(); + // The second time we read a partition, it will not see new keys till the metadataTTL expires assertEquals(0, reader.read("foo","baz").count()); assertEquals( Collections.singletonList(ByteBuffer.wrap("abc".getBytes())), reader.scan().flatMap(Map.Entry::getValue).map(ByteBuffer::wrap).collect(Collectors.toList()) ); + // Or we force the metadata to reload (check LookupData::trim!)! reader.trim(); assertEquals( From a04c3b890f4eac5a4fc5d181b4704939c32eac46 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Mon, 5 Aug 2019 20:07:05 -0400 Subject: [PATCH 11/22] Add metrics to blob stores --- .../blobs/VirtualAppendOnlyBlobStore.java | 22 +++++++++++-- .../uppend/blobs/VirtualLongBlobStore.java | 33 ++++++++++++++++--- .../uppend/blobs/VirtualMutableBlobStore.java | 29 +++++++++++++++- 3 files changed, 77 insertions(+), 7 deletions(-) diff --git a/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java b/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java index c29b9d22..30160236 100644 --- a/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java +++ b/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java @@ -1,5 +1,6 @@ package com.upserve.uppend.blobs; +import com.upserve.uppend.metrics.BlobStoreMetrics; import org.slf4j.Logger; import java.lang.invoke.MethodHandles; @@ -7,14 +8,27 @@ public class VirtualAppendOnlyBlobStore extends VirtualPageFileIO { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + final BlobStoreMetrics.Adders blobStoreMetricsAdders; + + public VirtualAppendOnlyBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile) { + this(virtualFileNumber, virtualPageFile, new BlobStoreMetrics.Adders()); + } + + public VirtualAppendOnlyBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile, BlobStoreMetrics.Adders blobStoreMetricsAdders) { super(virtualFileNumber, virtualPageFile); + this.blobStoreMetricsAdders = blobStoreMetricsAdders; } public long append(byte[] bytes) { - final long pos = appendPosition(recordSize(bytes)); + final long tic = System.nanoTime(); + final int size = recordSize(bytes); + final long pos = appendPosition(size); write(pos, byteRecord(bytes)); if (log.isTraceEnabled()) log.trace("appended {} bytes to {} at pos {}", bytes.length, virtualFileNumber, pos); + blobStoreMetricsAdders.appendCounter.increment(); + blobStoreMetricsAdders.bytesAppended.add(size); + blobStoreMetricsAdders.appendTimer.add(System.nanoTime() - tic); return pos; } @@ -31,12 +45,16 @@ public long getPosition() { * @return the byte array blob */ public byte[] read(long pos) { + final long tic = System.nanoTime(); if (log.isTraceEnabled()) log.trace("read mapped from {} @ {}", virtualFileNumber, pos); int size = readInt(pos); byte[] buf = new byte[size]; super.read(pos + 4, buf); - if (log.isTraceEnabled()) log.trace("read mapped {} bytes from {} @ {}", size, virtualFileNumber, pos); + + blobStoreMetricsAdders.readCounter.increment(); + blobStoreMetricsAdders.bytesRead.add(recordSize(buf)); + blobStoreMetricsAdders.readTimer.add(System.nanoTime() - tic); return buf; } diff --git a/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java b/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java index 600142cf..e0f98aba 100644 --- a/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java +++ b/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java @@ -1,6 +1,7 @@ package com.upserve.uppend.blobs; import com.google.common.collect.Maps; +import com.upserve.uppend.metrics.LongBlobStoreMetrics; import org.slf4j.Logger; import java.lang.invoke.MethodHandles; @@ -14,15 +15,28 @@ public class VirtualLongBlobStore extends VirtualPageFileIO { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - public VirtualLongBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile) { + final LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders; + + + public VirtualLongBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile, LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders) { super(virtualFileNumber, virtualPageFile); + this.longBlobStoreMetricsAdders = longBlobStoreMetricsAdders; + } + + public VirtualLongBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile) { + this(virtualFileNumber, virtualPageFile, new LongBlobStoreMetrics.Adders()); } public long append(long val, byte[] bytes) { + final long tic = System.nanoTime(); + final int size = recordSize(bytes); // Ensures that the long value is aligned with a single page. - final long pos = appendPageAlignedPosition(recordSize(bytes), 4, 12); - + final long pos = appendPageAlignedPosition(size, 4, 12); write(pos, byteRecord(val, bytes)); + + longBlobStoreMetricsAdders.appendCounter.increment(); + longBlobStoreMetricsAdders.bytesAppended.add(size); + longBlobStoreMetricsAdders.appendTimer.add(System.nanoTime() - tic); return pos; } @@ -31,18 +45,29 @@ public long getPosition() { } public void writeLong(long pos, long val) { + final long tic = System.nanoTime(); super.writeLong(pos + 4, val); + longBlobStoreMetricsAdders.longWrites.increment(); + longBlobStoreMetricsAdders.longWriteTimer.add(System.nanoTime() - tic); } public long readLong(long pos) { - return super.readLong(pos + 4); + final long tic = System.nanoTime(); + final long result = super.readLong(pos + 4); + longBlobStoreMetricsAdders.longReads.increment(); + longBlobStoreMetricsAdders.longReadTimer.add(System.nanoTime() - tic); + return result; } public byte[] readBlob(long pos) { + final long tic = System.nanoTime(); int size = readInt(pos); byte[] buf = new byte[size]; read(pos + 12, buf); + longBlobStoreMetricsAdders.readCounter.increment(); + longBlobStoreMetricsAdders.bytesRead.add(recordSize(buf)); + longBlobStoreMetricsAdders.readTimer.add(System.nanoTime() - tic); return buf; } diff --git a/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java b/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java index 0143db77..58c72c1e 100644 --- a/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java +++ b/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java @@ -1,6 +1,7 @@ package com.upserve.uppend.blobs; import com.google.common.hash.*; +import com.upserve.uppend.metrics.MutableBlobStoreMetrics; import org.slf4j.Logger; import java.lang.invoke.MethodHandles; @@ -11,19 +12,38 @@ public class VirtualMutableBlobStore extends VirtualPageFileIO { private static final HashFunction hashFunction = Hashing.murmur3_32(); + final MutableBlobStoreMetrics.Adders mutableBlobStoreMetricsAdders; + public VirtualMutableBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile) { + this(virtualFileNumber, virtualPageFile, new MutableBlobStoreMetrics.Adders()); + } + + public VirtualMutableBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile, MutableBlobStoreMetrics.Adders mutableBlobStoreMetricsAdders) { super(virtualFileNumber, virtualPageFile); + this.mutableBlobStoreMetricsAdders = mutableBlobStoreMetricsAdders; } public void write(long position, byte[] bytes) { + final long tic = System.nanoTime(); + final int size = recordSize(bytes); super.write(position, byteRecord(bytes)); + mutableBlobStoreMetricsAdders.writeCounter.increment(); + mutableBlobStoreMetricsAdders.bytesWritten.add(size); + mutableBlobStoreMetricsAdders.writeTimer.add(System.nanoTime() - tic); } public boolean isPageAllocated(long position) { return super.isPageAllocated(position); } + public byte[] readChecksum(long pos){ + byte[] checksum = new byte[4]; + read(pos + 4, checksum); + return checksum; + } + public byte[] read(long pos) { + final long tic = System.nanoTime(); if (log.isTraceEnabled()) log.trace("read mapped from {} @ {}", virtualFileNumber, pos); int size = readInt(pos); byte[] buf = new byte[size]; @@ -35,6 +55,9 @@ public byte[] read(long pos) { if (log.isTraceEnabled()) log.trace("read mapped {} bytes from {} @ {}", size, virtualFileNumber, pos); if (Arrays.equals(checksum, hashFunction.hashBytes(buf).asBytes())) { + mutableBlobStoreMetricsAdders.bytesRead.add(recordSize(buf)); + mutableBlobStoreMetricsAdders.readCounter.increment(); + mutableBlobStoreMetricsAdders.readTimer.add(System.nanoTime() - tic); return buf; } else { log.warn("Read at {} got size {}, checksum {} did not match bytes starting with {} (upto first 10)", @@ -47,10 +70,14 @@ private static int recordSize(byte[] inputBytes) { return inputBytes.length + 8; } + static byte[] byteChecksum(byte[] inputBytes) { + return hashFunction.hashBytes(inputBytes).asBytes(); + } + private static byte[] byteRecord(byte[] inputBytes) { byte[] result = new byte[recordSize(inputBytes)]; System.arraycopy(int2bytes(inputBytes.length), 0, result, 0, 4); - System.arraycopy(hashFunction.hashBytes(inputBytes).asBytes(), 0, result, 4, 4); + System.arraycopy(byteChecksum(inputBytes), 0, result, 4, 4); System.arraycopy(inputBytes, 0, result, 8, inputBytes.length); return result; From 9d2484b266530f78cab70c822d7ea1832ff42e55 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Mon, 5 Aug 2019 20:08:42 -0400 Subject: [PATCH 12/22] Add metrics to lookupdata. Fix reload. Add hash check when loading keys. --- .../com/upserve/uppend/lookup/LookupData.java | 150 +++++------ .../uppend/lookup/LookupDataIterator.java | 2 - .../upserve/uppend/lookup/LookupMetadata.java | 243 +++++++++--------- .../upserve/uppend/lookup/LookupDataTest.java | 4 +- 4 files changed, 194 insertions(+), 205 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 1a5add2b..5225e203 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -3,6 +3,7 @@ import com.google.common.collect.Maps; import com.upserve.uppend.*; import com.upserve.uppend.blobs.*; +import com.upserve.uppend.metrics.LookupDataMetrics; import org.slf4j.Logger; import java.io.*; @@ -52,23 +53,41 @@ public class LookupData implements Flushable, Trimmable { // Flushing every 30 seconds, we can run for 2000 years before the metaDataGeneration hits INTEGER.MAX_VALUE private AtomicInteger metaDataGeneration; - private LongAdder findKeyTimer; - private int flushCounter; - private long flushedKeyCounter; - public static LookupData lookupWriter(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int flushThreshold){ - return new LookupData(keyLongBlobs, metadataBlobs, flushThreshold, -1, false); + final LookupDataMetrics.Adders lookupDataMetricsAdders; + + + public static LookupData lookupWriter(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, + int flushThreshold){ + return lookupWriter(keyLongBlobs, metadataBlobs, flushThreshold, new LookupDataMetrics.Adders()); + } + + public static LookupData lookupWriter(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, + int flushThreshold, LookupDataMetrics.Adders lookupDataMetricsAdders){ + return new LookupData( + keyLongBlobs, metadataBlobs, flushThreshold, -1, false, lookupDataMetricsAdders + ); + } + + public static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, + int reloadInterval){ + return lookupReader(keyLongBlobs, metadataBlobs, reloadInterval, new LookupDataMetrics.Adders()); } - public static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int reloadInterval){ - return new LookupData(keyLongBlobs, metadataBlobs, -1, reloadInterval, true); + public static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, + int reloadInterval, LookupDataMetrics.Adders lookupDataMetricsAdders){ + return new LookupData( + keyLongBlobs, metadataBlobs, -1, reloadInterval, true, lookupDataMetricsAdders + ); } - private LookupData(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int flushThreshold, int reloadInterval, boolean readOnly) { + private LookupData(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int flushThreshold, + int reloadInterval, boolean readOnly, LookupDataMetrics.Adders lookupDataMetricsAdders) { this.keyLongBlobs = keyLongBlobs; this.metadataBlobs = metadataBlobs; this.readOnly = readOnly; + this.lookupDataMetricsAdders = lookupDataMetricsAdders; this.firstFlush = new AtomicBoolean(true); this.firstFlushThreshold = flushThreshold * (random.nextInt(100) + 25) / 100; @@ -81,7 +100,6 @@ private LookupData(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore me writeCacheCounter = new AtomicInteger(); metaDataGeneration = new AtomicInteger(); - findKeyTimer = new LongAdder(); atomicMetadataRef = new AtomicReference<>(); @@ -325,33 +343,10 @@ public long readValue(long keyPosition) { return keyLongBlobs.readLong(keyPosition); } - public long getMetadataLookupMissCount(){ - return getMetadata().getMissCount(); - } - - public long getMetadataLookupHitCount(){ - return getMetadata().getHitCount(); - } - public long getMetadataSize(){ return getMetadata().getNumKeys(); } - public long getFindKeyTimer(){ - return findKeyTimer.sum(); - } - - public long getFlushedKeyCount() { return flushedKeyCounter; } - - public int getFlushCount() { return flushCounter; } - - private Long timeFindKey(LookupMetadata md, VirtualLongBlobStore longBlobStore, LookupKey key) { - long tic = -System.nanoTime(); - Long val = md.findKey(longBlobStore, key); - findKeyTimer.add(System.nanoTime() + tic); - return val; - } - /** * Load a key from cached pages * @@ -366,23 +361,20 @@ private Long findValueFor(LookupKey key) { } } LookupMetadata md = getMetadata(); - return timeFindKey(md, keyLongBlobs, key); + return md.findKey(keyLongBlobs, key); } - // Allows calling of loadMetaData with default-0 LongAdders. - // This is used in the constructor of this class and also - // in the test class. LookupMetadata loadMetadata() { - return loadMetadata(new LongAdder(), new LongAdder()); + return loadMetadata(null); } - LookupMetadata loadMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { + LookupMetadata loadMetadata(LookupMetadata lookupMetadata) { try { return LookupMetadata.open( getMetadataBlobs(), getMetaDataGeneration(), - prevMissCount, - prevHitCount + lookupMetadata, + lookupDataMetricsAdders ); } catch (IllegalStateException e) { if (readOnly) { @@ -391,29 +383,27 @@ LookupMetadata loadMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { return LookupMetadata.open( getMetadataBlobs(), getMetaDataGeneration(), - prevMissCount, - prevHitCount + lookupMetadata, + lookupDataMetricsAdders ); } // `else` statement not needed because of the return statement above log.warn("getMetaData failed for read write store - attempting to repair it!", e); - return repairMetadata(prevMissCount, prevHitCount); + return repairMetadata(); } } - private synchronized LookupMetadata repairMetadata(LongAdder prevMissCount, LongAdder prevHitCount) { + private synchronized LookupMetadata repairMetadata() { int[] sortedPositions = keyLongBlobs.positionBlobStream() .sorted(Comparator.comparing(entry -> new LookupKey(entry.getValue()))) .mapToInt(entry -> entry.getKey().intValue()) .toArray(); - try { - int sortedPositionsSize = sortedPositions.length; - LookupKey minKey = sortedPositionsSize > 0 ? readKey((long) sortedPositions[0]) : null; - LookupKey maxKey = sortedPositionsSize > 0 ? readKey((long) sortedPositions[sortedPositionsSize - 1]) : null; - return LookupMetadata.generateMetadata(minKey, maxKey, sortedPositions, metadataBlobs, metaDataGeneration.incrementAndGet(), prevMissCount, prevHitCount); - } catch (IOException e) { - throw new UncheckedIOException("Unable to write repaired metadata!", e); - } + + int sortedPositionsSize = sortedPositions.length; + LookupKey minKey = sortedPositionsSize > 0 ? readKey((long) sortedPositions[0]) : null; + LookupKey maxKey = sortedPositionsSize > 0 ? readKey((long) sortedPositions[sortedPositionsSize - 1]) : null; + return LookupMetadata.generateMetadata(minKey, maxKey, sortedPositions, metadataBlobs, + metaDataGeneration.incrementAndGet(), lookupDataMetricsAdders); } private int getMetaDataGeneration() { @@ -473,7 +463,7 @@ void flushWriteCache(LookupMetadata currentMetadata) { // Check the metadata generation of the LookupKeys if (key.getMetaDataGeneration() != currentMetadataGeneration) { // Update the index of the key for the current metadata generation for so we can insert it correctly - timeFindKey(currentMetadata, keyLongBlobs, key); + currentMetadata.findKey(keyLongBlobs, key); } }) .forEach(key -> { @@ -501,17 +491,13 @@ void flushWriteCache(LookupMetadata currentMetadata) { } void generateMetaData(LookupMetadata currentMetadata) { - generateMetaData(currentMetadata, currentMetadata.missCount, currentMetadata.hitCount); - } - - void generateMetaData(LookupMetadata currentMetadata, LongAdder prevMissCount, LongAdder prevHitCount) { int[] currentKeySortOrder = currentMetadata.getKeyStorageOrder(); int flushSize = flushCache.size(); // Increment the stats here - flushedKeyCounter += flushSize; - flushCounter ++; + lookupDataMetricsAdders.flushedKeyCounter.add(flushSize); + lookupDataMetricsAdders.flushCounter.increment(); // Update the counter and flush again if there are still more entries in the write cache than the threshold if (flushThreshold != -1 && writeCacheCounter.addAndGet(-flushSize) > flushThreshold) { @@ -552,21 +538,17 @@ void generateMetaData(LookupMetadata currentMetadata, LongAdder prevMissCount, L log.debug("Finished creating sortOrder"); - try { - atomicMetadataRef.set( - LookupMetadata - .generateMetadata( - minKey, - maxKey, - newKeySortOrder, - metadataBlobs, - metaDataGeneration.incrementAndGet(), - prevMissCount, - prevHitCount) - ); - } catch (IOException e) { - throw new UncheckedIOException("Failed to write new metadata!", e); - } + atomicMetadataRef.set( + LookupMetadata + .generateMetadata( + minKey, + maxKey, + newKeySortOrder, + metadataBlobs, + metaDataGeneration.incrementAndGet(), + lookupDataMetricsAdders + ) + ); } protected LookupMetadata getMetadata() { @@ -575,12 +557,12 @@ protected LookupMetadata getMetadata() { LookupMetadata result = timeStampedMetadata.get(stamp); // Convert millis to seconds - if (((System.currentTimeMillis() - startTime) / 1000) > stamp[0]){ + if (reloadInterval > 0 && ((System.currentTimeMillis() - startTime) / 1000) > stamp[0]){ // a reloadInterval of 0 prevents reloading of the metadata - boolean reloadMetadata = !reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); + boolean reloadMetadata = reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); if (reloadMetadata) { - log.warn("getMetadata calling loadMetadata"); - result = loadMetadata(result.missCount, result.hitCount); + log.warn("Loading metadata"); + result = loadMetadata(result); timeStampedMetadata.set(result, stamp[0] + reloadInterval); } } @@ -596,6 +578,7 @@ public synchronized void flush() { if (readOnly) throw new RuntimeException("Can not flush read only LookupData"); if (writeCache.size() > 0) { + final long tic = System.nanoTime(); flushing.set(true); log.debug("starting flush"); @@ -607,6 +590,7 @@ public synchronized void flush() { flushCache.clear(); log.debug("flushed"); + lookupDataMetricsAdders.flushTimer.add(System.nanoTime() - tic); } flushing.set(false); } @@ -616,11 +600,11 @@ public void trim() { if (!readOnly) { flush(); } else { - LookupMetadata result = timeStampedMetadata.get(new int[1]); - int stamp = (int) ((System.currentTimeMillis() - startTime) / 1000) - 1; - // set to elapsed time minus one - it will reload next time it is used. - timeStampedMetadata.set(result, stamp); - reloadStamp.set(stamp); + int[] stamp = new int[1]; + LookupMetadata result = timeStampedMetadata.get(stamp); + result = loadMetadata(result); + timeStampedMetadata.set(result, stamp[0]); + reloadStamp.set(stamp[0]); } } diff --git a/src/main/java/com/upserve/uppend/lookup/LookupDataIterator.java b/src/main/java/com/upserve/uppend/lookup/LookupDataIterator.java index 21fb7ae3..bac40b21 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupDataIterator.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupDataIterator.java @@ -12,8 +12,6 @@ public class LookupDataIterator implements Iterator { private final Iterator writeCacheKeyIterator; LongFunction reader; - private final AtomicLong position = new AtomicLong(); - LookupDataIterator(int[] positions, int writeCacheSize, Iterator writeCacheKeyIterator, LongFunction reader) { // Get a snapshot of the keys this.positions = positions; diff --git a/src/main/java/com/upserve/uppend/lookup/LookupMetadata.java b/src/main/java/com/upserve/uppend/lookup/LookupMetadata.java index 22b9d6a7..9d1c32d2 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupMetadata.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupMetadata.java @@ -1,12 +1,13 @@ package com.upserve.uppend.lookup; import com.upserve.uppend.blobs.*; +import com.upserve.uppend.metrics.LookupDataMetrics; import org.slf4j.Logger; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.nio.*; -import java.util.Arrays; +import java.util.*; import java.util.concurrent.atomic.LongAdder; /** * The bisect tree is linearized as follows @@ -42,17 +43,25 @@ public class LookupMetadata { private final LookupKey maxKey; private final int[] keyStorageOrder; - final LongAdder hitCount; - final LongAdder missCount; - public static LookupMetadata generateMetadata(LookupKey minKey, LookupKey maxKey, int[] keyStorageOrder, VirtualMutableBlobStore metaDataBlobs, int metadataGeneration, LongAdder missCount, LongAdder hitCount) throws IOException { + final LookupDataMetrics.Adders lookupDataMetricsAdders; + final byte[] checksum; + static LookupMetadata generateMetadata(LookupKey minKey, LookupKey maxKey, int[] keyStorageOrder, + VirtualMutableBlobStore metaDataBlobs, int metadataGeneration) { + return generateMetadata( + minKey, maxKey, keyStorageOrder, metaDataBlobs, metadataGeneration, new LookupDataMetrics.Adders() + ); + } + + static LookupMetadata generateMetadata(LookupKey minKey, LookupKey maxKey, int[] keyStorageOrder, + VirtualMutableBlobStore metaDataBlobs, int metadataGeneration, + LookupDataMetrics.Adders lookupDataMetricsAdders) { LookupMetadata newMetadata = new LookupMetadata( minKey, maxKey, keyStorageOrder, metadataGeneration, - missCount, - hitCount + lookupDataMetricsAdders ); newMetadata.writeTo(metaDataBlobs); @@ -61,36 +70,44 @@ public static LookupMetadata generateMetadata(LookupKey minKey, LookupKey maxKey } LookupMetadata(LookupKey minKey, LookupKey maxKey, int[] keyStorageOrder, int metadataGeneration) { - this(minKey, maxKey, keyStorageOrder, metadataGeneration, new LongAdder(), new LongAdder()); + this(minKey, maxKey, keyStorageOrder, metadataGeneration, new LookupDataMetrics.Adders()); + } - private LookupMetadata(LookupKey minKey, LookupKey maxKey, int[] keyStorageOrder, int metadataGeneration, LongAdder missCount, LongAdder hitCount) { + LookupMetadata(LookupKey minKey, LookupKey maxKey, int[] keyStorageOrder, int metadataGeneration, + LookupDataMetrics.Adders lookupDataMetricsAdders) { this.numKeys = keyStorageOrder.length; this.minKey = minKey; this.maxKey = maxKey; this.keyStorageOrder = keyStorageOrder; this.metadataGeneration = metadataGeneration; + this.lookupDataMetricsAdders = lookupDataMetricsAdders; - this.hitCount = hitCount; - this.missCount = missCount; - + this.checksum = null; } public static LookupMetadata open(VirtualMutableBlobStore metadataBlobs, int metadataGeneration) { - return open(metadataBlobs, metadataGeneration, new LongAdder(), new LongAdder()); + return open(metadataBlobs, metadataGeneration, null, new LookupDataMetrics.Adders()); } - public static LookupMetadata open(VirtualMutableBlobStore metadataBlobs, int metadataGeneration, LongAdder missCount, LongAdder hitCount) { - // TODO can we preserve bisectKeys if the content is not changed? (Currently periodic reload clears this cache) + public static LookupMetadata open(VirtualMutableBlobStore metadataBlobs, int metadataGeneration, + LookupMetadata previous, LookupDataMetrics.Adders lookupDataMetricsAdders) { if (metadataBlobs.isPageAllocated(0L)) { - byte[] bytes = metadataBlobs.read(0L); - return new LookupMetadata(bytes, metadataGeneration, missCount, hitCount); + byte[] currentChecksum = metadataBlobs.readChecksum(0L); + + // If the checksum has not changed return the previously LookupMetadata + if (Objects.nonNull(previous) && Arrays.equals(currentChecksum, previous.checksum)) { + return previous; + } else { + byte[] bytes = metadataBlobs.read(0L); + return new LookupMetadata(bytes, metadataGeneration, currentChecksum, lookupDataMetricsAdders); + } } else { - return new LookupMetadata(null, null, new int[0], metadataGeneration, missCount, hitCount); + return new LookupMetadata(null, null, new int[0], metadataGeneration, lookupDataMetricsAdders); } } - private LookupMetadata(byte[] bytes, int metadataGeneration, LongAdder missCount, LongAdder hitCount) { + private LookupMetadata(byte[] bytes, int metadataGeneration, byte[] checksum, LookupDataMetrics.Adders lookupDataMetricsAdders) { ByteBuffer buffer = ByteBuffer.wrap(bytes); int minKeyLength, maxKeyLength; @@ -113,9 +130,8 @@ private LookupMetadata(byte[] bytes, int metadataGeneration, LongAdder missCount } this.metadataGeneration = metadataGeneration; - - this.hitCount = hitCount; - this.missCount = missCount; + this.checksum = checksum; + this.lookupDataMetricsAdders = lookupDataMetricsAdders; } /** @@ -128,102 +144,111 @@ private LookupMetadata(byte[] bytes, int metadataGeneration, LongAdder missCount * @param key the key to find and mark * @return the position of the key */ - public Long findKey(VirtualLongBlobStore longBlobStore, LookupKey key) { - - key.setMetaDataGeneration(metadataGeneration); - - if (numKeys == 0) { - key.setInsertAfterSortIndex(-1); - missCount.increment(); - return null; - } + Long findKey(VirtualLongBlobStore longBlobStore, LookupKey key) { + // Use a try finally block to time execution + // https://softwareengineering.stackexchange.com/questions/210428/is-try-finally-expensive + final long tic = System.nanoTime(); + try { + key.setMetaDataGeneration(metadataGeneration); - int keyIndexLower = 0; - int keyIndexUpper = numKeys - 1; - LookupKey lowerKey = minKey; - LookupKey upperKey = maxKey; + if (numKeys == 0) { + key.setInsertAfterSortIndex(-1); + lookupDataMetricsAdders.lookupMissCount.increment(); + return null; + } - int bisectCount = 0; - int bisectKeyTreeArrayIndex = 1; + int keyIndexLower = 0; + int keyIndexUpper = numKeys - 1; + LookupKey lowerKey = minKey; + LookupKey upperKey = maxKey; - int keyPosition; - LookupKey midpointKey; - int midpointKeyIndex; + int bisectCount = 0; + int bisectKeyTreeArrayIndex = 1; - int comparison = lowerKey.compareTo(key); - if (comparison > 0 /* new key is less than lowerKey */) { - key.setInsertAfterSortIndex(-1); // Insert it after this index in the sort order - missCount.increment(); - return null; - } - if (comparison == 0) { - key.setPosition(keyStorageOrder[keyIndexLower]); - hitCount.increment(); - return longBlobStore.readLong(keyStorageOrder[keyIndexLower]); - } + int keyPosition; + LookupKey midpointKey; + int midpointKeyIndex; - comparison = upperKey.compareTo(key); - if (comparison < 0 /* new key is greater than upperKey */) { - key.setInsertAfterSortIndex(keyIndexUpper); // Insert it after this index in the sort order - missCount.increment(); - return null; - } - if (comparison == 0) { - key.setPosition(keyStorageOrder[keyIndexUpper]); - hitCount.increment(); - return longBlobStore.readLong(keyStorageOrder[keyIndexUpper]); - } - - if (numKeys == 2) { // There are no other values keys besides upper and lower - key.setInsertAfterSortIndex(keyIndexLower); - missCount.increment(); - return null; - } + int comparison = lowerKey.compareTo(key); + if (comparison > 0 /* new key is less than lowerKey */) { + key.setInsertAfterSortIndex(-1); // Insert it after this index in the sort order + lookupDataMetricsAdders.lookupMissCount.increment(); + return null; + } + if (comparison == 0) { + key.setPosition(keyStorageOrder[keyIndexLower]); + lookupDataMetricsAdders.lookupHitCount.increment(); + return longBlobStore.readLong(keyStorageOrder[keyIndexLower]); + } - // bisect till we find the key or return null - do { - midpointKeyIndex = keyIndexLower + ((keyIndexUpper - keyIndexLower) / 2); + comparison = upperKey.compareTo(key); + if (comparison < 0 /* new key is greater than upperKey */) { + key.setInsertAfterSortIndex(keyIndexUpper); // Insert it after this index in the sort order + lookupDataMetricsAdders.lookupMissCount.increment(); + return null; + } + if (comparison == 0) { + key.setPosition(keyStorageOrder[keyIndexUpper]); + lookupDataMetricsAdders.lookupHitCount.increment(); + return longBlobStore.readLong(keyStorageOrder[keyIndexUpper]); + } - if (log.isTraceEnabled()) - log.trace("reading {}: [{}, {}], [{}, {}], {}", key, keyIndexLower, keyIndexUpper, lowerKey, upperKey, midpointKeyIndex); + if (numKeys == 2) { // There are no other values keys besides upper and lower + key.setInsertAfterSortIndex(keyIndexLower); + lookupDataMetricsAdders.lookupMissCount.increment(); + return null; + } - keyPosition = keyStorageOrder[midpointKeyIndex]; - // Cache only the most frequently used midpoint keys - if (bisectCount < MAX_BISECT_KEY_CACHE_DEPTH) { - if (bisectKeys[bisectKeyTreeArrayIndex] == null){ - midpointKey = bisectKeys[bisectKeyTreeArrayIndex] = new LookupKey(longBlobStore.readBlob(keyPosition)); + // bisect till we find the key or return null + do { + midpointKeyIndex = keyIndexLower + ((keyIndexUpper - keyIndexLower) / 2); + + if (log.isTraceEnabled()) + log.trace("reading {}: [{}, {}], [{}, {}], {}", key, keyIndexLower, keyIndexUpper, lowerKey, upperKey, midpointKeyIndex); + + keyPosition = keyStorageOrder[midpointKeyIndex]; + // Cache only the most frequently used midpoint keys + if (bisectCount < MAX_BISECT_KEY_CACHE_DEPTH) { + if (bisectKeys[bisectKeyTreeArrayIndex] == null) { + lookupDataMetricsAdders.cacheMissCount.increment(); + midpointKey = bisectKeys[bisectKeyTreeArrayIndex] = new LookupKey(longBlobStore.readBlob(keyPosition)); + } else { + lookupDataMetricsAdders.cacheHitCount.increment(); + midpointKey = bisectKeys[bisectKeyTreeArrayIndex]; + } } else { - midpointKey = bisectKeys[bisectKeyTreeArrayIndex]; + midpointKey = new LookupKey(longBlobStore.readBlob(keyPosition)); } - } else { - midpointKey = new LookupKey(longBlobStore.readBlob(keyPosition)); - } - comparison = key.compareTo(midpointKey); + comparison = key.compareTo(midpointKey); - if (comparison == 0) { - key.setPosition(keyPosition); - hitCount.increment(); - return longBlobStore.readLong(keyPosition); - } + if (comparison == 0) { + key.setPosition(keyPosition); + lookupDataMetricsAdders.lookupHitCount.increment(); + return longBlobStore.readLong(keyPosition); + } - if (comparison < 0) { - upperKey = midpointKey; - keyIndexUpper = midpointKeyIndex; - bisectKeyTreeArrayIndex = bisectKeyTreeArrayIndex * 2; + if (comparison < 0) { + upperKey = midpointKey; + keyIndexUpper = midpointKeyIndex; + bisectKeyTreeArrayIndex = bisectKeyTreeArrayIndex * 2; - } else { - lowerKey = midpointKey; - keyIndexLower = midpointKeyIndex; - bisectKeyTreeArrayIndex = bisectKeyTreeArrayIndex * 2 + 1; - } + } else { + lowerKey = midpointKey; + keyIndexLower = midpointKeyIndex; + bisectKeyTreeArrayIndex = bisectKeyTreeArrayIndex * 2 + 1; + } - bisectCount++; - } while ((keyIndexLower + 1) < keyIndexUpper); + bisectCount++; + } while ((keyIndexLower + 1) < keyIndexUpper); - key.setInsertAfterSortIndex(keyIndexLower); // Insert it in the sort order after this key - missCount.increment(); - return null; + key.setInsertAfterSortIndex(keyIndexLower); // Insert it in the sort order after this key + lookupDataMetricsAdders.lookupMissCount.increment(); + return null; + } + finally { + lookupDataMetricsAdders.findKeyTimer.add(System.nanoTime() - tic); + } } public static int treeSize(int depth) { @@ -264,26 +289,10 @@ public int getMetadataGeneration() { return metadataGeneration; } - /** - * Size of keyStorageOrder in bytes - * @return the weight in bytes - */ - public int weight() { - return numKeys * 4; - } - public int getNumKeys() { return numKeys; } - public long getHitCount() { - return hitCount.sum(); - } - - public long getMissCount(){ - return missCount.sum(); - } - public int[] getKeyStorageOrder() { return keyStorageOrder; } diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index 6e52f401..4d9bd1ad 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -244,15 +244,13 @@ public void testScanNonExistant() { @Test public void testLoadReadOnlyMetadata() { - LookupData data = LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL); mutableBlobStore.write(0, Ints.toByteArray(50)); mutableBlobStore.write(4, Ints.toByteArray(284482732)); // Check checksum - thrown.expect(IllegalStateException.class); thrown.expectMessage("Checksum did not match for the requested blob"); - data.getValue(new LookupKey("foobar")); + LookupData data = LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL); } @Test From c58ecfd4df33b8523de308795a87b57e4c6b4457 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Mon, 5 Aug 2019 20:10:17 -0400 Subject: [PATCH 13/22] Add metrics. Move blocked long pos from separate file into mapped header. Fix tests. --- .../java/com/upserve/uppend/BlockedLongs.java | 203 +++++++++++------- .../com/upserve/uppend/BlockedLongsTest.java | 50 +---- 2 files changed, 136 insertions(+), 117 deletions(-) diff --git a/src/main/java/com/upserve/uppend/BlockedLongs.java b/src/main/java/com/upserve/uppend/BlockedLongs.java index cb99b406..19ee0971 100644 --- a/src/main/java/com/upserve/uppend/BlockedLongs.java +++ b/src/main/java/com/upserve/uppend/BlockedLongs.java @@ -1,6 +1,7 @@ package com.upserve.uppend; import com.google.common.util.concurrent.Striped; +import com.upserve.uppend.metrics.*; import org.slf4j.Logger; import java.io.*; @@ -23,6 +24,8 @@ public class BlockedLongs implements AutoCloseable, Flushable { private static final int PAGE_SIZE = 128 * 1024 * 1024; // allocate 128 MB chunks private static final int MAX_PAGES = 32 * 1024; // max 4 TB + static final int HEADER_BYTES = 128; // Currently 16 used for file size and append count + private final Path file; private final int valuesPerBlock; @@ -31,24 +34,32 @@ public class BlockedLongs implements AutoCloseable, Flushable { private final FileChannel blocks; private final MappedByteBuffer[] pages; - private final FileChannel blocksPos; + private static final int posBufPosition = 0; private final MappedByteBuffer posBuf; private final AtomicLong posMem; + private static final int appendBufPosition = 8; + private final MappedByteBuffer appendBuf; + private final AtomicInteger currentPage; private final boolean readOnly; - private final LongAdder appendCounter; - private final LongAdder allocCounter; - private final LongAdder valuesReadCounter; + final BlockedLongMetrics.Adders blockedLongMetricsAdders; + private long initialAppendCount; // Should be final, but must be able to clear! + final LongAdder appendCounter = new LongAdder(); + + BlockedLongs(Path file, int valuesPerBlock, boolean readOnly) { + this(file, valuesPerBlock, readOnly, new BlockedLongMetrics.Adders()); + } - public BlockedLongs(Path file, int valuesPerBlock, boolean readOnly) { + BlockedLongs(Path file, int valuesPerBlock, boolean readOnly, BlockedLongMetrics.Adders blockedLongMetricsAdders) { if (file == null) { throw new IllegalArgumentException("null file"); } this.file = file; this.readOnly = readOnly; + this.blockedLongMetricsAdders = blockedLongMetricsAdders; Path dir = file.getParent(); try { @@ -57,16 +68,10 @@ public BlockedLongs(Path file, int valuesPerBlock, boolean readOnly) { throw new UncheckedIOException("unable to mkdirs: " + dir, e); } - Path posFile = dir.resolve(file.getFileName() + ".pos"); - if (valuesPerBlock < 1) { throw new IllegalArgumentException("bad (< 1) values per block: " + valuesPerBlock); } - appendCounter = new LongAdder(); - allocCounter = new LongAdder(); - valuesReadCounter = new LongAdder(); - this.valuesPerBlock = valuesPerBlock; blockSize = 16 + valuesPerBlock * 8; @@ -90,33 +95,46 @@ public BlockedLongs(Path file, int valuesPerBlock, boolean readOnly) { } pages = new MappedByteBuffer[MAX_PAGES]; - ensurePage(0); currentPage = new AtomicInteger(0); try { - blocksPos = FileChannel.open(posFile, openOptions); - if (blocksPos.size() > 8) { - throw new IllegalStateException("bad (!= 8) size for block pos file: " + posFile); - } + posBuf = blocks.map(readOnly ? FileChannel.MapMode.READ_ONLY : FileChannel.MapMode.READ_WRITE, posBufPosition, 8); + } catch (IOException e) { + throw new UncheckedIOException("Unable to map pos buffer at in " + file, e); + } + long pos = posBuf.getLong(0); + + if (pos == 0) { + // This is a new file try { - posBuf = blocksPos.map(readOnly ? FileChannel.MapMode.READ_ONLY : FileChannel.MapMode.READ_WRITE, 0, 8); + blocks.write(ByteBuffer.wrap(new byte[HEADER_BYTES]), 0L); } catch (IOException e) { - throw new UncheckedIOException("unable to map pos buffer at in " + posFile, e); - } - long pos = posBuf.getLong(0); - if (pos < 0) { - throw new IllegalStateException("negative pos (" + pos + "): " + posFile); + throw new UncheckedIOException("Unable to write blank header to file " + file, e); } + posBuf.putLong(HEADER_BYTES); + pos = HEADER_BYTES; + } + else if (pos < HEADER_BYTES) { + throw new IllegalStateException("negative pos (" + pos + "): " + file); + } + + try { if (pos > blocks.size()) { - throw new IllegalStateException("pos (" + pos + ") > size of " + file + " (" + blocks.size() + "): " + posFile); + throw new IllegalStateException("pos (" + pos + ") > size of " + file + " (" + blocks.size() + ")"); } - posMem = new AtomicLong(pos); - + } catch (IOException e) { + throw new UncheckedIOException("Could not size the " + file + " file!", e); + } + try { + appendBuf = blocks.map(readOnly ? FileChannel.MapMode.READ_ONLY : FileChannel.MapMode.READ_WRITE, appendBufPosition, 8); } catch (IOException e) { - throw new UncheckedIOException("unable to init blocks pos file: " + posFile, e); + throw new UncheckedIOException("Unable to map pos buffer at in " + file, e); } + initialAppendCount = appendBuf.getLong(0); + + posMem = new AtomicLong(pos); } /** @@ -124,22 +142,14 @@ public BlockedLongs(Path file, int valuesPerBlock, boolean readOnly) { * * @return the position of the new block */ - public long allocate() { + long allocate() { log.trace("allocating block of {} bytes in {}", blockSize, file); - allocCounter.increment(); + blockedLongMetricsAdders.blockAllocationCounter.increment(); long pos = posMem.getAndAdd(blockSize); posBuf.putLong(0, posMem.get()); return pos; } - /** - * get some stats about the blocked long store - * @return Stats about activity in this BlockedLongs - */ - public BlockStats stats() { - return new BlockStats(currentPage.get() , size(), appendCounter.longValue(), allocCounter.longValue(), valuesReadCounter.longValue()); - } - /** * Return the current number of bytes which have been allocated for blocks * @@ -153,13 +163,39 @@ public long size() { } } + /** + * Get the number of long blocks allocated + * @return long block count + */ + public long getBlockCount() { + return (size() - HEADER_BYTES) / blockSize; + } + + /** + * Get the append count for the blocked long store. + * For the writer this will be exact and when called the file will be updated. For the reader it will be approximate + * depending on when the writer last updated the blocked long file. + * @return the number of longs appended to the file + */ + public long getCount() { + if (readOnly){ + return appendBuf.getLong(0); + } else { + final long count = initialAppendCount + appendCounter.sum(); + appendBuf.putLong(0, count); + return count; + } + } + public void append(final long pos, final long val) { log.trace("appending value {} to {} at {}", val, file, pos); if (readOnly) throw new RuntimeException("Can not append a read only blocked longs file: " + file); // size | -next // prev | -last + blockedLongMetricsAdders.appendCounter.increment(); appendCounter.increment(); + final long tic = System.nanoTime(); Lock lock = stripedLocks.getAt((int) (pos % LOCK_SIZE)); lock.lock(); @@ -197,22 +233,27 @@ public void append(final long pos, final long val) { } } finally { lock.unlock(); + blockedLongMetricsAdders.appendTimer.add(System.nanoTime() - tic); } log.trace("appended value {} to {} at {}", val, file, pos); } public LongStream values(Long pos) { log.trace("streaming values from {} at {}", file, pos); + final long tic = System.nanoTime(); + blockedLongMetricsAdders.readCounter.increment(); - valuesReadCounter.increment(); - + final LongStream result; if (pos == null) { // pos will be null for missing keys - return LongStream.empty(); + result = LongStream.empty(); + } else { + long[] longs = valuesArray(pos); + blockedLongMetricsAdders.longsReadCounter.add(longs.length); + result = Arrays.stream(longs); } - - long[] longs = valuesArray(pos); - return Arrays.stream(longs); + blockedLongMetricsAdders.readTimer.add(System.nanoTime() - tic); + return result; } public long[] valuesArray(Long pos) { @@ -312,35 +353,42 @@ public long[] valuesArray(Long pos) { public long lastValue(long pos) { log.trace("reading last value from {} at {}", file, pos); + blockedLongMetricsAdders.readLastCounter.increment(); + final long tic = System.nanoTime(); + final long value; + if (pos >= posMem.get()) { - return -1; - } + value = -1; + } else { - // size | -next - // prev | -last + // size | -next + // prev | -last - final long prev = readLong(pos + 8); - if (prev > 0) { - throw new IllegalStateException("lastValue called at non-starting block: pos=" + pos); - } - long last = prev == 0 ? pos : -prev; - long size = readLong(last); - if (size == 0) { - if (prev < 0) { - throw new IllegalStateException("got to empty last block: pos=" + pos); + final long prev = readLong(pos + 8); + if (prev > 0) { + throw new IllegalStateException("lastValue called at non-starting block: pos=" + pos); } - return -1; - } - if (size < 0) { - log.debug("Read recovery for last block (at " + last + "): pos=" + pos); - last = -size; - size = readLong(last); - } - if (size > valuesPerBlock) { - throw new IllegalStateException("too high num values: expected <= " + valuesPerBlock + ", got " + size + ": pos=" + pos); + long last = prev == 0 ? pos : -prev; + long size = readLong(last); + if (size == 0) { + if (prev < 0) { + throw new IllegalStateException("got to empty last block: pos=" + pos); + } + return -1; + } + if (size < 0) { + log.debug("Read recovery for last block (at " + last + "): pos=" + pos); + last = -size; + size = readLong(last); + } + if (size > valuesPerBlock) { + throw new IllegalStateException("too high num values: expected <= " + valuesPerBlock + ", got " + size + ": pos=" + pos); + } + value = readLong(last + 16 + 8 * (size - 1)); } - long value = readLong(last + 16 + 8 * (size - 1)); log.trace("got value from {} at {}: {}", file, pos, value); + blockedLongMetricsAdders.readLastTimer.add(System.nanoTime() - tic); + return value; } @@ -348,9 +396,16 @@ public void clear() { log.debug("clearing {}", file); IntStream.range(0, LOCK_SIZE).forEach(index -> stripedLocks.getAt(index).lock()); try { - blocks.truncate(0); - posBuf.putLong(0, 0); - posMem.set(0); + blocks.truncate(HEADER_BYTES); + try { + blocks.write(ByteBuffer.wrap(new byte[HEADER_BYTES]), 0L); + } catch (IOException e) { + throw new UncheckedIOException("Unable to write blank header to file " + file, e); + } + posBuf.putLong(0, HEADER_BYTES); + posMem.set(HEADER_BYTES); + appendBuf.putLong(0, 0L); + initialAppendCount = 0L; Arrays.fill(pages, null); currentPage.set(0); ensurePage(0); @@ -367,7 +422,6 @@ public void close() throws IOException { if (readOnly) { blocks.close(); - blocksPos.close(); return; } @@ -375,7 +429,6 @@ public void close() throws IOException { try { flush(); blocks.close(); - blocksPos.close(); } finally { IntStream.range(0, LOCK_SIZE).forEach(index -> stripedLocks.getAt(index).unlock()); } @@ -386,6 +439,8 @@ public void flush() { if (readOnly) return; log.debug("flushing {}", file); posBuf.force(); + appendBuf.putLong(0, initialAppendCount + appendCounter.sum()); + appendBuf.force(); Arrays.stream(pages) .parallel() @@ -408,17 +463,17 @@ public void trim() { } private long readLong(long pos) { - int pagePos = (int) (pos % (long) PAGE_SIZE); + int pagePos = (int) ((pos - HEADER_BYTES) % (long) PAGE_SIZE); return page(pos).getLong(pagePos); } protected void writeLong(long pos, long val) { - int pagePos = (int) (pos % (long) PAGE_SIZE); + int pagePos = (int) ((pos - HEADER_BYTES) % (long) PAGE_SIZE); page(pos).putLong(pagePos, val); } private MappedByteBuffer page(long pos) { - long pageIndexLong = pos / PAGE_SIZE; + long pageIndexLong = (pos - HEADER_BYTES) / PAGE_SIZE; if (pageIndexLong > Integer.MAX_VALUE) { throw new RuntimeException("page index exceeded max int: " + pageIndexLong); } @@ -445,7 +500,7 @@ private MappedByteBuffer ensurePage(int pageIndex) { synchronized (pages) { page = pages[pageIndex]; if (page == null) { - long pageStart = (long) pageIndex * PAGE_SIZE; + long pageStart = (long) pageIndex * PAGE_SIZE + HEADER_BYTES; try { FileChannel.MapMode mapMode = readOnly ? FileChannel.MapMode.READ_ONLY : FileChannel.MapMode.READ_WRITE; page = blocks.map(mapMode, pageStart, PAGE_SIZE); diff --git a/src/test/java/com/upserve/uppend/BlockedLongsTest.java b/src/test/java/com/upserve/uppend/BlockedLongsTest.java index 2d26eff0..106d6966 100644 --- a/src/test/java/com/upserve/uppend/BlockedLongsTest.java +++ b/src/test/java/com/upserve/uppend/BlockedLongsTest.java @@ -16,13 +16,11 @@ public class BlockedLongsTest { private Path path = Paths.get("build/test/tmp/block"); - private Path posPath = path.resolveSibling(path.getFileName() + ".pos"); private boolean readOnly = false; @Before public void initialize() throws Exception { SafeDeleting.removeTempPath(path); - SafeDeleting.removeTempPath(posPath); } @Test @@ -33,15 +31,6 @@ public void testCtor() { new BlockedLongs(path, 1000, readOnly); } - @Test(expected = UncheckedIOException.class) - public void testCtorNoPosFile() throws Exception { - BlockedLongs block = new BlockedLongs(path, 1, readOnly); - block.close(); - Files.delete(posPath); - Files.createDirectories(posPath); - new BlockedLongs(path, 1, readOnly); - } - @Test(expected = IllegalArgumentException.class) public void testCtorWithNullFile() { new BlockedLongs(null, 1, readOnly); @@ -58,13 +47,13 @@ public void testCtorWithNegativeValuesPerBlock() { } @Test - public void testAllocate() throws Exception { + public void testAllocateClear() throws Exception { for (int i = 1; i <= 20; i++) { BlockedLongs v = new BlockedLongs(path, i, readOnly); long pos1 = v.allocate(); long pos2 = v.allocate(); - assertEquals(0, pos1); - assertEquals(16 + (8 * i), pos2); // brittle + assertEquals(BlockedLongs.HEADER_BYTES, pos1); + assertEquals(BlockedLongs.HEADER_BYTES + 16 + (8 * i), pos2); // brittle v.clear(); } } @@ -107,7 +96,7 @@ public void testAppendTooHighNumValues() throws Exception { ByteBuffer longBuf = ThreadLocalByteBuffers.LOCAL_LONG_BUFFER.get(); longBuf.putLong(20); longBuf.flip(); - chan.write(longBuf, 0); + chan.write(longBuf, pos1); } v.append(pos1, 0); } @@ -164,7 +153,7 @@ public void blockedLongBeating() throws Exception { assertEquals(TEST_APPENDS * 4, testData.values().stream().mapToLong(List::size).sum()); long expectedBlocks = testData.values().stream().mapToLong(vals -> (vals.size() + VALS_PER_BLOCK - 1) / VALS_PER_BLOCK).sum(); - long actualBlocks = block.size() / (16 + VALS_PER_BLOCK * 8); + long actualBlocks = block.getBlockCount(); assertEquals(expectedBlocks, actualBlocks); } @@ -255,40 +244,15 @@ public void testReadRepair() { assertArrayEquals(new long[]{1L, 1L, 1L, 2L, 3L}, block.values(pos).toArray()); } - @Test - public void testStats() { - BlockedLongs v = new BlockedLongs(path, 10, readOnly); - BlockStats stats = v.stats(); - assertNotNull(stats); - Assert.assertEquals(0, stats.getAllocCount()); - Assert.assertEquals(0, stats.getAppendCount()); - Assert.assertEquals(0, stats.getPagesLoaded()); - Assert.assertEquals(0, stats.getSize()); - Assert.assertEquals(0, stats.getValuesReadCount()); - long pos1 = v.allocate(); - for (long i = 0; i < 20; i++) { - v.append(pos1, i); - } - v.values(0L); - stats = v.stats(); - assertNotNull(stats); - Assert.assertEquals(2, stats.getAllocCount()); - Assert.assertEquals(20, stats.getAppendCount()); - Assert.assertEquals(1, stats.getPagesLoaded()); - Assert.assertTrue(stats.getSize() > 10); - Assert.assertTrue(stats.getSize() < 1000); - Assert.assertEquals(1, stats.getValuesReadCount()); - } - @Test public void testEmptyCases() { BlockedLongs v = new BlockedLongs(path, 10, readOnly); - OptionalLong val = v.values(0L).findAny(); + OptionalLong val = v.values((long)BlockedLongs.HEADER_BYTES).findAny(); assertFalse(val.isPresent()); val = v.values(null).findAny(); assertFalse(val.isPresent()); val = v.values(-1L).findAny(); assertFalse(val.isPresent()); - assertEquals(-1, v.lastValue(0)); + assertEquals(-1, v.lastValue(BlockedLongs.HEADER_BYTES)); } } From d5844db47f037e6aef4b69b38016c56d40bbea91 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Mon, 5 Aug 2019 20:10:38 -0400 Subject: [PATCH 14/22] Use new metrics in benchmark --- .../upserve/uppend/cli/CommandBenchmark.java | 1 + .../uppend/cli/benchmark/Benchmark.java | 32 ++++++++++++++----- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/main/java/com/upserve/uppend/cli/CommandBenchmark.java b/src/main/java/com/upserve/uppend/cli/CommandBenchmark.java index 16b8d1a1..53a64489 100644 --- a/src/main/java/com/upserve/uppend/cli/CommandBenchmark.java +++ b/src/main/java/com/upserve/uppend/cli/CommandBenchmark.java @@ -132,6 +132,7 @@ private Benchmark createBenchmark() { .withBlobPageSize(blobPageSize) .withLookupPageSize(keyPageSize) .withMetadataPageSize(metadataPageSize) + //.withMetadataTTL(30) // To run with a metadata expiration to force reload of new keys .withFlushThreshold(flushThreshold) .withFlushDelaySeconds(flushDelay) .withStoreMetrics(metrics); diff --git a/src/main/java/com/upserve/uppend/cli/benchmark/Benchmark.java b/src/main/java/com/upserve/uppend/cli/benchmark/Benchmark.java index c60f1e7f..58b88915 100644 --- a/src/main/java/com/upserve/uppend/cli/benchmark/Benchmark.java +++ b/src/main/java/com/upserve/uppend/cli/benchmark/Benchmark.java @@ -3,6 +3,8 @@ import com.codahale.metrics.*; import com.codahale.metrics.Timer; import com.upserve.uppend.*; +import com.upserve.uppend.metrics.*; +import com.upserve.uppend.metrics.LookupDataMetrics; import org.slf4j.Logger; import java.io.IOException; @@ -37,8 +39,11 @@ public class Benchmark { private final ForkJoinPool writerPool; private final ForkJoinPool readerPool; - AtomicReference partitionStats; - AtomicReference blockStats; + AtomicReference lookupDataMetricsReference; + AtomicReference blockedLongMetricsReference; + AtomicReference blobStoreMetricsReference; + AtomicReference longBlobStoreMetricsReference; + AtomicReference mutableBlobStoreMetricsReference; public LongSummaryStatistics writerStats() { return writer.getStats(); @@ -93,8 +98,11 @@ public Benchmark(BenchmarkMode mode, AppendOnlyStoreBuilder builder, long range, throw new RuntimeException("Unknown mode: " + mode); } - partitionStats = new AtomicReference<>(testInstance.getPartitionStats()); - blockStats = new AtomicReference<>(testInstance.getBlockLongStats()); + lookupDataMetricsReference = new AtomicReference<>(testInstance.getLookupDataMetrics()); + blockedLongMetricsReference = new AtomicReference<>(testInstance.getBlockedLongMetrics()); + blobStoreMetricsReference = new AtomicReference<>(testInstance.getBlobStoreMetrics()); + longBlobStoreMetricsReference = new AtomicReference<>(testInstance.getLongBlobStoreMetrics()); + mutableBlobStoreMetricsReference = new AtomicReference<>(testInstance.getMutableBlobStoreMetrics()); } private BenchmarkWriter simpleWriter() { @@ -202,12 +210,20 @@ public void run() { log.info(String.format("Read: %7.2fmb/s %7.2fr/s; Write %7.2fmb/s %7.2fa/s; Mem %7.2fmb free %7.2fmb total", readRate, keysReadPerSecond, writeRate, appendsPerSecond, free, total)); - PartitionStats pStats = testInstance.getPartitionStats(); - log.info(pStats.present(partitionStats.getAndSet(pStats))); + LookupDataMetrics lookupDataMetrics = testInstance.getLookupDataMetrics(); + log.info(lookupDataMetrics.present(lookupDataMetricsReference.getAndSet(lookupDataMetrics))); - BlockStats bStats = testInstance.getBlockLongStats(); - log.info("Block Stats: {}", bStats.minus(blockStats.getAndSet(bStats))); + BlockedLongMetrics blockedLongMetrics = testInstance.getBlockedLongMetrics(); + log.info(blockedLongMetrics.present(blockedLongMetricsReference.getAndSet(blockedLongMetrics))); + BlobStoreMetrics blobStoreMetrics = testInstance.getBlobStoreMetrics(); + log.info(blobStoreMetrics.present(blobStoreMetricsReference.getAndSet(blobStoreMetrics))); + + LongBlobStoreMetrics longBlobStoreMetrics = testInstance.getLongBlobStoreMetrics(); + log.info(longBlobStoreMetrics.present(longBlobStoreMetricsReference.getAndSet(longBlobStoreMetrics))); + + MutableBlobStoreMetrics mutableBlobStoreMetrics = testInstance.getMutableBlobStoreMetrics(); + log.info(mutableBlobStoreMetrics.present(mutableBlobStoreMetricsReference.getAndSet(mutableBlobStoreMetrics))); } catch (Exception e) { log.info("logTimer failed with ", e); From 695882ff851ce9962f8134fca82f5cf9cf304dbc Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Tue, 6 Aug 2019 10:55:19 -0400 Subject: [PATCH 15/22] Modified LookupData to support unit testing, and added unit tests for LookupData.getMetadata(). --- .../com/upserve/uppend/lookup/LookupData.java | 4 +--- .../upserve/uppend/lookup/LookupDataTest.java | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 5225e203..953a1809 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -38,7 +38,7 @@ public class LookupData implements Flushable, Trimmable { // Timestamped references for readers private final AtomicStampedReference timeStampedMetadata; - private final AtomicInteger reloadStamp; + /*private final*/ AtomicInteger reloadStamp; // removed 'private final' to support unit testing private final long startTime; private final boolean readOnly; @@ -555,7 +555,6 @@ protected LookupMetadata getMetadata() { if (readOnly){ int[] stamp = new int[1]; LookupMetadata result = timeStampedMetadata.get(stamp); - // Convert millis to seconds if (reloadInterval > 0 && ((System.currentTimeMillis() - startTime) / 1000) > stamp[0]){ // a reloadInterval of 0 prevents reloading of the metadata @@ -568,7 +567,6 @@ protected LookupMetadata getMetadata() { } return result; } else { - return atomicMetadataRef.get(); } } diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index 4d9bd1ad..ae20d510 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -6,6 +6,9 @@ import com.upserve.uppend.util.SafeDeleting; import org.junit.*; import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.nio.file.*; @@ -15,7 +18,10 @@ import java.util.stream.*; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; +@RunWith(MockitoJUnitRunner.class) public class LookupDataTest { private static final int RELOAD_INTERVAL = -1; private static final int FLUSH_THRESHOLD = -1; @@ -320,4 +326,19 @@ public void testFlushWithAppendLoad() throws ExecutionException, InterruptedExce writer.join(); } + + @Test + public void testGetMetadataShouldNotLoadMetada() { + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); + data.getMetadata(); // returns a LookupMetadata, but that value is irrelevant here + Mockito.verify(data, never()).loadMetadata(); + } + + @Test + public void testGetMetadataShouldLoadMetada() { + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); + data.reloadStamp.set(1); + data.getMetadata(); // returns a LookupMetadata, but that value is irrelevant here + Mockito.verify(data).loadMetadata(any(), any()); + } } From 0b76f8f97cd5f17c9fca00a3394084a40096c9d4 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Tue, 6 Aug 2019 13:01:38 -0400 Subject: [PATCH 16/22] Modified unit tests to compare object identities instead of using mocked instances. Also inverted the reloadMetadata flag in the getMetadata method. --- .../com/upserve/uppend/lookup/LookupData.java | 6 ++--- .../upserve/uppend/lookup/LookupDataTest.java | 24 +++++++++---------- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 953a1809..4d8bbc9b 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -38,8 +38,8 @@ public class LookupData implements Flushable, Trimmable { // Timestamped references for readers private final AtomicStampedReference timeStampedMetadata; - /*private final*/ AtomicInteger reloadStamp; // removed 'private final' to support unit testing - private final long startTime; + /*private final*/ AtomicInteger reloadStamp; // removed 'private final' in order to support unit testing + /*private final*/ long startTime; private final boolean readOnly; @@ -558,7 +558,7 @@ protected LookupMetadata getMetadata() { // Convert millis to seconds if (reloadInterval > 0 && ((System.currentTimeMillis() - startTime) / 1000) > stamp[0]){ // a reloadInterval of 0 prevents reloading of the metadata - boolean reloadMetadata = reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); + boolean reloadMetadata = !reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); if (reloadMetadata) { log.warn("Loading metadata"); result = loadMetadata(result); diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index ae20d510..01467efd 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -6,9 +6,6 @@ import com.upserve.uppend.util.SafeDeleting; import org.junit.*; import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.nio.file.*; @@ -18,10 +15,7 @@ import java.util.stream.*; import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; -@RunWith(MockitoJUnitRunner.class) public class LookupDataTest { private static final int RELOAD_INTERVAL = -1; private static final int FLUSH_THRESHOLD = -1; @@ -329,16 +323,20 @@ public void testFlushWithAppendLoad() throws ExecutionException, InterruptedExce @Test public void testGetMetadataShouldNotLoadMetada() { - LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); - data.getMetadata(); // returns a LookupMetadata, but that value is irrelevant here - Mockito.verify(data, never()).loadMetadata(); + LookupData data = LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL); + LookupMetadata lmd1 = data.getMetadata(); + data.startTime -= 60_000; + LookupMetadata lmd2 = data.getMetadata(); + assertEquals(lmd1, lmd2); } @Test public void testGetMetadataShouldLoadMetada() { - LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); - data.reloadStamp.set(1); - data.getMetadata(); // returns a LookupMetadata, but that value is irrelevant here - Mockito.verify(data).loadMetadata(any(), any()); + LookupData data = LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5); + LookupMetadata lmd1 = data.getMetadata(); + data.startTime -= 60_000; + data.reloadStamp.set(10); + LookupMetadata lmd2 = data.getMetadata(); + assertNotEquals(lmd1, lmd2); } } From 37decb8c27d0bc3293cad117a4336b35c70f1704 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Wed, 7 Aug 2019 12:50:42 -0400 Subject: [PATCH 17/22] Added a few more unit tests for the LookupData.getMetadata method. --- .../com/upserve/uppend/lookup/LookupData.java | 21 +++-- .../upserve/uppend/lookup/LookupDataTest.java | 87 ++++++++++++++++--- 2 files changed, 93 insertions(+), 15 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 4d8bbc9b..26b2cf18 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -37,9 +37,9 @@ public class LookupData implements Flushable, Trimmable { private AtomicReference atomicMetadataRef; // Timestamped references for readers - private final AtomicStampedReference timeStampedMetadata; - /*private final*/ AtomicInteger reloadStamp; // removed 'private final' in order to support unit testing - /*private final*/ long startTime; + final AtomicStampedReference timeStampedMetadata; // removed 'private' to support unit testing + final AtomicInteger reloadStamp; // removed 'private' to support unit testing + private final long startTime; private final boolean readOnly; @@ -81,6 +81,17 @@ public static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, Virtual ); } + /** + * The Constructor (TM). + * + * @param keyLongBlobs storage for keys and associated long values + * @param metadataBlobs lexically sorted key index + * @param flushThreshold number of keys that trigger scheduling of flush; use n == -1 to disable + * @param reloadInterval (for read-only mode) number of seconds to metadata to expire, reload is immediate for + * the first thread that hits it; use n <= 0 to disable + * @param readOnly a very self-descriptive boolean value + * @param lookupDataMetricsAdders thread-safe timing and metrics container + */ private LookupData(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int flushThreshold, int reloadInterval, boolean readOnly, LookupDataMetrics.Adders lookupDataMetricsAdders) { this.keyLongBlobs = keyLongBlobs; @@ -556,9 +567,9 @@ protected LookupMetadata getMetadata() { int[] stamp = new int[1]; LookupMetadata result = timeStampedMetadata.get(stamp); // Convert millis to seconds - if (reloadInterval > 0 && ((System.currentTimeMillis() - startTime) / 1000) > stamp[0]){ + if (reloadInterval > 0 && (System.currentTimeMillis() - startTime / 1000) > stamp[0]){ // a reloadInterval of 0 prevents reloading of the metadata - boolean reloadMetadata = !reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); + boolean reloadMetadata = reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); if (reloadMetadata) { log.warn("Loading metadata"); result = loadMetadata(result); diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index 01467efd..a19e2825 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -6,6 +6,9 @@ import com.upserve.uppend.util.SafeDeleting; import org.junit.*; import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.nio.file.*; @@ -15,7 +18,11 @@ import java.util.stream.*; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +@RunWith(MockitoJUnitRunner.class) public class LookupDataTest { private static final int RELOAD_INTERVAL = -1; private static final int FLUSH_THRESHOLD = -1; @@ -322,21 +329,81 @@ public void testFlushWithAppendLoad() throws ExecutionException, InterruptedExce } @Test - public void testGetMetadataShouldNotLoadMetada() { - LookupData data = LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL); + public void testGetMetadataShouldNotLoadMetada_1() { + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); + int[] stamp = new int[1]; + LookupMetadata expected = data.timeStampedMetadata.get(stamp); + data.timeStampedMetadata.set(expected, 5); LookupMetadata lmd1 = data.getMetadata(); - data.startTime -= 60_000; - LookupMetadata lmd2 = data.getMetadata(); - assertEquals(lmd1, lmd2); + assertTrue(expected == lmd1); + Mockito.verify(data, never()).loadMetadata(); } @Test - public void testGetMetadataShouldLoadMetada() { - LookupData data = LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5); + // The reload interval is set to 5s but the data is set to be reloaded at 10s, so the reload will + // not happen because not enough actual time has elapsed (this test runs in mere milliseconds). + public void testGetMetadataShouldNotLoadMetada_2() { + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5)); + int[] stamp = new int[1]; + LookupMetadata expected = data.timeStampedMetadata.get(stamp); + data.timeStampedMetadata.set(expected, 10); LookupMetadata lmd1 = data.getMetadata(); - data.startTime -= 60_000; - data.reloadStamp.set(10); + assertTrue(expected == lmd1); + Mockito.verify(data, never()).loadMetadata(); + } + + @Test + // The reload interval is set to 20s and the last reload time is set to -10s (an absolutely fake time) + // in order to force the loadMetadata method to be called. + public void testGetMetadataShouldLoadMetada_1() { + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 20)); + int[] stamp = new int[1]; + LookupMetadata expected = data.timeStampedMetadata.get(stamp); + data.reloadStamp.set(-10); + data.timeStampedMetadata.set(expected, -10); + LookupMetadata lmd1 = data.getMetadata(); + assertFalse("with no data in the mutableBlobStore, getMetadata returns a new instance", expected == lmd1); LookupMetadata lmd2 = data.getMetadata(); - assertNotEquals(lmd1, lmd2); + assertTrue(lmd1 == lmd2); + Mockito.verify(data, times(1)).loadMetadata(any()); + } + + @Test + public void testGetMetadataShouldLoadMetada_2() { + LookupData dataWriter = LookupData.lookupWriter(keyBlobStore, mutableBlobStore, FLUSH_THRESHOLD); + // add a key & value to the blob store + final LookupKey key1 = new LookupKey("mykey1"); + dataWriter.put(key1, 80); + dataWriter.flush(); + + LookupData dataReader = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 20)); + int[] stamp = new int[1]; + LookupMetadata expected = dataReader.timeStampedMetadata.get(stamp); + dataReader.reloadStamp.set(-10); + dataReader.timeStampedMetadata.set(expected, -10); + LookupMetadata lmd1 = dataReader.getMetadata(); + + assertTrue("with data in the mutableBlobStore, getMetadata should not return a new instance", + expected == lmd1); + assertEquals(lmd1.getNumKeys(), 1); + Mockito.verify(dataReader, times(1)).loadMetadata(any()); + + // add a second key & value to the blob store + final LookupKey key2 = new LookupKey("mykey2"); + dataWriter.put(key2, 80); + dataWriter.flush(); + + dataReader.reloadStamp.set(-10); + dataReader.timeStampedMetadata.set(lmd1, -10); + LookupMetadata lmd2 = dataReader.getMetadata(); + + assertTrue("a new key has been added, so the LookupMetadata instance should be new", + lmd2 != lmd1); + assertEquals(lmd2.getNumKeys(), 2); + Mockito.verify(dataReader, times(2)).loadMetadata(any()); + + LookupMetadata lmd3 = dataReader.getMetadata(); + assertTrue("nothing has changed since the last call to getMetadata so the instance should not change", + lmd2 == lmd3); } } From 08d5727cad9700da6d59d0889264e0fe40bc5570 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Wed, 7 Aug 2019 13:11:00 -0400 Subject: [PATCH 18/22] Fixed a unit test. --- src/test/java/com/upserve/uppend/lookup/LookupDataTest.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index a19e2825..280c18af 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -362,9 +362,8 @@ public void testGetMetadataShouldLoadMetada_1() { data.reloadStamp.set(-10); data.timeStampedMetadata.set(expected, -10); LookupMetadata lmd1 = data.getMetadata(); - assertFalse("with no data in the mutableBlobStore, getMetadata returns a new instance", expected == lmd1); - LookupMetadata lmd2 = data.getMetadata(); - assertTrue(lmd1 == lmd2); + assertFalse("with no data in the mutableBlobStore, getMetadata returns a new instance", + expected == lmd1); Mockito.verify(data, times(1)).loadMetadata(any()); } From 7546c7dcbd69c3472c77979cb04b67522a52a031 Mon Sep 17 00:00:00 2001 From: Jeff Meunier Date: Wed, 7 Aug 2019 15:25:34 -0400 Subject: [PATCH 19/22] Improved and corrected some unit & integration tests. --- .../com/upserve/uppend/lookup/LookupData.java | 5 +- .../upserve/uppend/lookup/LookupDataTest.java | 94 ++++++++++++------- 2 files changed, 63 insertions(+), 36 deletions(-) diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 26b2cf18..8e0fe3e8 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -567,11 +567,12 @@ protected LookupMetadata getMetadata() { int[] stamp = new int[1]; LookupMetadata result = timeStampedMetadata.get(stamp); // Convert millis to seconds - if (reloadInterval > 0 && (System.currentTimeMillis() - startTime / 1000) > stamp[0]){ + long timeDiff = System.currentTimeMillis() - startTime; + if (reloadInterval > 0 && (timeDiff / 1000) > stamp[0]){ // a reloadInterval of 0 prevents reloading of the metadata boolean reloadMetadata = reloadStamp.compareAndSet(stamp[0], stamp[0] + reloadInterval); if (reloadMetadata) { - log.warn("Loading metadata"); + log.debug("Loading metadata"); result = loadMetadata(result); timeStampedMetadata.set(result, stamp[0] + reloadInterval); } diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index 280c18af..74fa0c73 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -329,61 +329,84 @@ public void testFlushWithAppendLoad() throws ExecutionException, InterruptedExce } @Test - public void testGetMetadataShouldNotLoadMetada_1() { + public void testGetMetadataReloadDeactivated() { LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); int[] stamp = new int[1]; LookupMetadata expected = data.timeStampedMetadata.get(stamp); - data.timeStampedMetadata.set(expected, 5); LookupMetadata lmd1 = data.getMetadata(); assertTrue(expected == lmd1); Mockito.verify(data, never()).loadMetadata(); } @Test - // The reload interval is set to 5s but the data is set to be reloaded at 10s, so the reload will - // not happen because not enough actual time has elapsed (this test runs in mere milliseconds). - public void testGetMetadataShouldNotLoadMetada_2() { + public void testGetMetadataShouldReload() { LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5)); int[] stamp = new int[1]; LookupMetadata expected = data.timeStampedMetadata.get(stamp); - data.timeStampedMetadata.set(expected, 10); + + // Set timestamp and reload concurrent-accesss value for compare and set operation + data.timeStampedMetadata.set(expected, -5); + data.reloadStamp.set(-5); + + // the metadata is reloaded LookupMetadata lmd1 = data.getMetadata(); - assertTrue(expected == lmd1); - Mockito.verify(data, never()).loadMetadata(); + assertTrue(expected != lmd1); + + // Don't reload again - timestamp not expired + LookupMetadata lmd2 = data.getMetadata(); + assertTrue("Timestamp not expired, so instances should be identical", lmd1 == lmd2); + Mockito.verify(data, times(1)).loadMetadata(any()); } @Test - // The reload interval is set to 20s and the last reload time is set to -10s (an absolutely fake time) - // in order to force the loadMetadata method to be called. - public void testGetMetadataShouldLoadMetada_1() { - LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 20)); + public void testGetMetadataShouldNotReload() { + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5)); int[] stamp = new int[1]; LookupMetadata expected = data.timeStampedMetadata.get(stamp); - data.reloadStamp.set(-10); - data.timeStampedMetadata.set(expected, -10); + + // Timestamp not expired + LookupMetadata lmd0 = data.getMetadata(); + assertTrue("Timestamp not expired, so LookupMetadata instances should be identical", + expected == lmd0); + + // Set the timestamp to be expired but leave the concurrent-access value so the compare and set fails + data.timeStampedMetadata.set(expected, -5); + + // The lookup metadata is not reloaded LookupMetadata lmd1 = data.getMetadata(); - assertFalse("with no data in the mutableBlobStore, getMetadata returns a new instance", - expected == lmd1); + assertTrue("Timestamp expired but concurrent-access value not equal to timestamp", + expected == lmd1); + Mockito.verify(data, never()).loadMetadata(); + + data.reloadStamp.set(-5); + LookupMetadata lmd2 = data.getMetadata(); + assertTrue("Timestamp expired and concurrent-access value equal to timestamp; LookupMetadata reloaded", + lmd2 != expected); Mockito.verify(data, times(1)).loadMetadata(any()); } @Test - public void testGetMetadataShouldLoadMetada_2() { + public void testGetMetadataIntegration() { + // Integration test with actual flushed keys + + // Make a reader with no keys + LookupData dataReader = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 20)); + int[] stamp = new int[1]; + LookupMetadata lmd0 = dataReader.timeStampedMetadata.get(stamp); + assertEquals(0, lmd0.getNumKeys()); + + // Make a writer and add a key LookupData dataWriter = LookupData.lookupWriter(keyBlobStore, mutableBlobStore, FLUSH_THRESHOLD); - // add a key & value to the blob store final LookupKey key1 = new LookupKey("mykey1"); dataWriter.put(key1, 80); dataWriter.flush(); - LookupData dataReader = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 20)); - int[] stamp = new int[1]; - LookupMetadata expected = dataReader.timeStampedMetadata.get(stamp); + // Expire the reader metadata dataReader.reloadStamp.set(-10); - dataReader.timeStampedMetadata.set(expected, -10); + dataReader.timeStampedMetadata.set(lmd0, -10); LookupMetadata lmd1 = dataReader.getMetadata(); - - assertTrue("with data in the mutableBlobStore, getMetadata should not return a new instance", - expected == lmd1); + assertTrue("with data in the mutableBlobStore, getMetadata should load a new instance", + lmd0 != lmd1); assertEquals(lmd1.getNumKeys(), 1); Mockito.verify(dataReader, times(1)).loadMetadata(any()); @@ -392,17 +415,20 @@ public void testGetMetadataShouldLoadMetada_2() { dataWriter.put(key2, 80); dataWriter.flush(); - dataReader.reloadStamp.set(-10); - dataReader.timeStampedMetadata.set(lmd1, -10); LookupMetadata lmd2 = dataReader.getMetadata(); + assertTrue("Metadata is not expired so no reload, instances are the same",lmd2 == lmd1); - assertTrue("a new key has been added, so the LookupMetadata instance should be new", - lmd2 != lmd1); - assertEquals(lmd2.getNumKeys(), 2); - Mockito.verify(dataReader, times(2)).loadMetadata(any()); - + // Expire the timestamp but don't adjust the concurrent-access value + dataReader.timeStampedMetadata.set(lmd1, -10); LookupMetadata lmd3 = dataReader.getMetadata(); - assertTrue("nothing has changed since the last call to getMetadata so the instance should not change", - lmd2 == lmd3); + assertTrue(lmd3 == lmd1); + + // Adjust the concurrent-access value - now it will actually reload and return the new metadata + dataReader.reloadStamp.set(-10); + LookupMetadata lmd4 = dataReader.getMetadata(); + + assertTrue("a new key has been added, so the instance should be new", lmd4 != lmd1); + assertEquals(lmd4.getNumKeys(), 2); + Mockito.verify(dataReader, times(2)).loadMetadata(any()); } } From 3c616a7b6cc74f8c41241e29bb400ba04584cca1 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Wed, 7 Aug 2019 17:07:00 -0400 Subject: [PATCH 20/22] Add more comments in getMetadata tests --- .../upserve/uppend/lookup/LookupDataTest.java | 97 ++++++++++++++----- 1 file changed, 72 insertions(+), 25 deletions(-) diff --git a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java index 74fa0c73..f96eae20 100644 --- a/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java +++ b/src/test/java/com/upserve/uppend/lookup/LookupDataTest.java @@ -333,67 +333,88 @@ public void testGetMetadataReloadDeactivated() { LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, RELOAD_INTERVAL)); int[] stamp = new int[1]; LookupMetadata expected = data.timeStampedMetadata.get(stamp); + + // Even when expired - don't reload + data.timeStampedMetadata.set(expected, -5); + data.reloadStamp.set(-5); LookupMetadata lmd1 = data.getMetadata(); - assertTrue(expected == lmd1); + assertSame( + "When the reload interval less than zero it should never reload", + expected, + lmd1 + ); Mockito.verify(data, never()).loadMetadata(); } @Test public void testGetMetadataShouldReload() { - LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5)); + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 50)); int[] stamp = new int[1]; LookupMetadata expected = data.timeStampedMetadata.get(stamp); - // Set timestamp and reload concurrent-accesss value for compare and set operation + // Set timestamp and reload concurrent-access value for compare and set operation data.timeStampedMetadata.set(expected, -5); data.reloadStamp.set(-5); - // the metadata is reloaded LookupMetadata lmd1 = data.getMetadata(); - assertTrue(expected != lmd1); + assertNotSame( + "When the reload is active (GT 0) and the time appears to be expired it should reload", + expected, + lmd1 + ); - // Don't reload again - timestamp not expired LookupMetadata lmd2 = data.getMetadata(); - assertTrue("Timestamp not expired, so instances should be identical", lmd1 == lmd2); + assertSame( + "Don't reload again till the timestamp expires again", + lmd1, + lmd2 + ); Mockito.verify(data, times(1)).loadMetadata(any()); } @Test public void testGetMetadataShouldNotReload() { - LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 5)); + LookupData data = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 50)); int[] stamp = new int[1]; LookupMetadata expected = data.timeStampedMetadata.get(stamp); - // Timestamp not expired LookupMetadata lmd0 = data.getMetadata(); - assertTrue("Timestamp not expired, so LookupMetadata instances should be identical", - expected == lmd0); + assertSame( + "Timestamp not expired, so LookupMetadata instances should be identical", + expected, + lmd0 + ); // Set the timestamp to be expired but leave the concurrent-access value so the compare and set fails data.timeStampedMetadata.set(expected, -5); // The lookup metadata is not reloaded LookupMetadata lmd1 = data.getMetadata(); - assertTrue("Timestamp expired but concurrent-access value not equal to timestamp", - expected == lmd1); + assertSame( + "Timestamp expired but concurrent-access value not equal to timestamp", + expected, + lmd1 + ); Mockito.verify(data, never()).loadMetadata(); data.reloadStamp.set(-5); LookupMetadata lmd2 = data.getMetadata(); - assertTrue("Timestamp expired and concurrent-access value equal to timestamp; LookupMetadata reloaded", - lmd2 != expected); + + assertNotSame( + "Timestamp expired and concurrent-access value equal to timestamp; LookupMetadata reloaded", + lmd2, + expected + ); Mockito.verify(data, times(1)).loadMetadata(any()); } @Test public void testGetMetadataIntegration() { - // Integration test with actual flushed keys - // Make a reader with no keys - LookupData dataReader = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 20)); + LookupData dataReader = Mockito.spy(LookupData.lookupReader(keyBlobStore, mutableBlobStore, 50)); int[] stamp = new int[1]; LookupMetadata lmd0 = dataReader.timeStampedMetadata.get(stamp); - assertEquals(0, lmd0.getNumKeys()); + assertEquals("There are no keys yet!", 0, lmd0.getNumKeys()); // Make a writer and add a key LookupData dataWriter = LookupData.lookupWriter(keyBlobStore, mutableBlobStore, FLUSH_THRESHOLD); @@ -405,9 +426,12 @@ public void testGetMetadataIntegration() { dataReader.reloadStamp.set(-10); dataReader.timeStampedMetadata.set(lmd0, -10); LookupMetadata lmd1 = dataReader.getMetadata(); - assertTrue("with data in the mutableBlobStore, getMetadata should load a new instance", - lmd0 != lmd1); - assertEquals(lmd1.getNumKeys(), 1); + assertNotSame( + "with data in the mutableBlobStore, after expiration we should reload", + lmd0, + lmd1 + ); + assertEquals("we flushed one key and reloaded",lmd1.getNumKeys(), 1); Mockito.verify(dataReader, times(1)).loadMetadata(any()); // add a second key & value to the blob store @@ -416,19 +440,42 @@ public void testGetMetadataIntegration() { dataWriter.flush(); LookupMetadata lmd2 = dataReader.getMetadata(); - assertTrue("Metadata is not expired so no reload, instances are the same",lmd2 == lmd1); + assertSame( + "Metadata is not expired so no reload, instances are the same", + lmd2, + lmd1 + ); // Expire the timestamp but don't adjust the concurrent-access value dataReader.timeStampedMetadata.set(lmd1, -10); LookupMetadata lmd3 = dataReader.getMetadata(); - assertTrue(lmd3 == lmd1); + assertSame( + "If the stamp has expired only the thread where the concurrent_access value matches will do the reload", + lmd3, + lmd1 + ); // Adjust the concurrent-access value - now it will actually reload and return the new metadata dataReader.reloadStamp.set(-10); LookupMetadata lmd4 = dataReader.getMetadata(); - assertTrue("a new key has been added, so the instance should be new", lmd4 != lmd1); + assertNotSame( + "This call sees compare and set True then do the reload", + lmd4, + lmd1 + ); assertEquals(lmd4.getNumKeys(), 2); Mockito.verify(dataReader, times(2)).loadMetadata(any()); + + // Without any new keys, expire the timestamp and call reload but the returned metadata instance stays the same + dataReader.reloadStamp.set(-10); + dataReader.timeStampedMetadata.set(lmd4, -10); + LookupMetadata lmd5 = dataReader.getMetadata(); + assertSame( + "Without a new flush, the load operation should return the same metadata", + lmd5, + lmd4 + ); + Mockito.verify(dataReader, times(3)).loadMetadata(any()); } } From 14ee603f03317f6a17d41fa859dd309da23ea8a0 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Wed, 7 Aug 2019 17:15:19 -0400 Subject: [PATCH 21/22] Add todo to fix parallel test --- src/test/java/com/upserve/uppend/CounterStoreTest.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/com/upserve/uppend/CounterStoreTest.java b/src/test/java/com/upserve/uppend/CounterStoreTest.java index 9a1ccc35..dd90ffe7 100644 --- a/src/test/java/com/upserve/uppend/CounterStoreTest.java +++ b/src/test/java/com/upserve/uppend/CounterStoreTest.java @@ -217,6 +217,7 @@ public void testExample() { @Test public void testParallelWriteThenRead() throws Exception { + // TODO this test fails intermittently final int numKeys = 1000; final int totalIncrements = 1_000_000; log.info("parallel: starting {} keys, {} total increments", numKeys, totalIncrements); From 9e1500247fb4b4dbf1e65cd0d780be99be4714a2 Mon Sep 17 00:00:00 2001 From: David Stuebe Date: Wed, 7 Aug 2019 21:23:15 -0400 Subject: [PATCH 22/22] cleanup formatting and fix access levels --- .../upserve/uppend/AppendStorePartition.java | 23 +++++++---- .../java/com/upserve/uppend/BlockedLongs.java | 24 ++++++------ .../upserve/uppend/CounterStorePartition.java | 10 ++--- .../upserve/uppend/FileAppendOnlyStore.java | 6 +-- .../java/com/upserve/uppend/Partition.java | 4 +- .../blobs/VirtualAppendOnlyBlobStore.java | 3 +- .../uppend/blobs/VirtualLongBlobStore.java | 3 +- .../uppend/blobs/VirtualMutableBlobStore.java | 4 +- .../com/upserve/uppend/lookup/LookupData.java | 18 +++++---- .../uppend/metrics/BlobStoreMetrics.java | 3 +- .../uppend/metrics/BlockedLongMetrics.java | 4 +- .../uppend/metrics/InternalMetrics.java | 38 ++++++++----------- .../uppend/metrics/LongBlobStoreMetrics.java | 2 +- .../uppend/metrics/LookupDataMetrics.java | 4 +- .../metrics/MutableBlobStoreMetrics.java | 3 +- 15 files changed, 74 insertions(+), 75 deletions(-) diff --git a/src/main/java/com/upserve/uppend/AppendStorePartition.java b/src/main/java/com/upserve/uppend/AppendStorePartition.java index 39959d5c..20658958 100644 --- a/src/main/java/com/upserve/uppend/AppendStorePartition.java +++ b/src/main/java/com/upserve/uppend/AppendStorePartition.java @@ -16,9 +16,9 @@ public class AppendStorePartition extends Partition implements Flushable, Closeable { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - final BlockedLongs blocks; + private final BlockedLongs blocks; private final VirtualAppendOnlyBlobStore[] blobs; - final VirtualPageFile blobFile; + private final VirtualPageFile blobFile; private static Path blobsFile(Path partitiondDir) { return partitiondDir.resolve("blobStore"); @@ -28,7 +28,7 @@ private static Path blocksFile(Path partitiondDir) { return partitiondDir.resolve("blockedLongs"); } - public static AppendStorePartition createPartition(Path parentDir, String partition, AppendOnlyStoreBuilder builder) { + static AppendStorePartition createPartition(Path parentDir, String partition, AppendOnlyStoreBuilder builder) { Path partitionDir = validatePartition(parentDir, partition); @@ -70,7 +70,7 @@ public static AppendStorePartition createPartition(Path parentDir, String partit return new AppendStorePartition(keys, metadata, blobs, blocks, false, builder); } - public static AppendStorePartition openPartition(Path parentDir, String partition, boolean readOnly, AppendOnlyStoreBuilder builder) { + static AppendStorePartition openPartition(Path parentDir, String partition, boolean readOnly, AppendOnlyStoreBuilder builder) { validatePartition(partition); Path partitionDir = parentDir.resolve(partition); @@ -127,7 +127,8 @@ private AppendStorePartition( blobs = IntStream.range(0, hashCount) .mapToObj(virtualFileNumber -> new VirtualAppendOnlyBlobStore( - virtualFileNumber, blobsFile, builder.getBlobStoreMetricsAdders()) + virtualFileNumber, blobsFile, builder.getBlobStoreMetricsAdders() + ) ) .toArray(VirtualAppendOnlyBlobStore[]::new); } @@ -195,8 +196,8 @@ Stream keys() { } void clear() throws IOException { - longKeyFile.close(); - metadataBlobFile.close(); + getLongKeyFile().close(); + getMetadataBlobFile().close(); blobFile.close(); blocks.close(); @@ -210,4 +211,12 @@ public void close() throws IOException { blobFile.close(); blocks.close(); } + + BlockedLongs getBlocks() { + return blocks; + } + + VirtualPageFile getBlobFile() { + return blobFile; + } } diff --git a/src/main/java/com/upserve/uppend/BlockedLongs.java b/src/main/java/com/upserve/uppend/BlockedLongs.java index 19ee0971..43b4f4a0 100644 --- a/src/main/java/com/upserve/uppend/BlockedLongs.java +++ b/src/main/java/com/upserve/uppend/BlockedLongs.java @@ -25,6 +25,8 @@ public class BlockedLongs implements AutoCloseable, Flushable { private static final int MAX_PAGES = 32 * 1024; // max 4 TB static final int HEADER_BYTES = 128; // Currently 16 used for file size and append count + private static final int posBufPosition = 0; + private static final int appendBufPosition = 8; private final Path file; @@ -34,19 +36,17 @@ public class BlockedLongs implements AutoCloseable, Flushable { private final FileChannel blocks; private final MappedByteBuffer[] pages; - private static final int posBufPosition = 0; private final MappedByteBuffer posBuf; private final AtomicLong posMem; - private static final int appendBufPosition = 8; - private final MappedByteBuffer appendBuf; + private final MappedByteBuffer appendCountBuf; private final AtomicInteger currentPage; private final boolean readOnly; - final BlockedLongMetrics.Adders blockedLongMetricsAdders; + private final BlockedLongMetrics.Adders blockedLongMetricsAdders; private long initialAppendCount; // Should be final, but must be able to clear! - final LongAdder appendCounter = new LongAdder(); + private final LongAdder appendCounter = new LongAdder(); BlockedLongs(Path file, int valuesPerBlock, boolean readOnly) { this(file, valuesPerBlock, readOnly, new BlockedLongMetrics.Adders()); @@ -128,11 +128,11 @@ else if (pos < HEADER_BYTES) { } try { - appendBuf = blocks.map(readOnly ? FileChannel.MapMode.READ_ONLY : FileChannel.MapMode.READ_WRITE, appendBufPosition, 8); + appendCountBuf = blocks.map(readOnly ? FileChannel.MapMode.READ_ONLY : FileChannel.MapMode.READ_WRITE, appendBufPosition, 8); } catch (IOException e) { throw new UncheckedIOException("Unable to map pos buffer at in " + file, e); } - initialAppendCount = appendBuf.getLong(0); + initialAppendCount = appendCountBuf.getLong(0); posMem = new AtomicLong(pos); } @@ -179,10 +179,10 @@ public long getBlockCount() { */ public long getCount() { if (readOnly){ - return appendBuf.getLong(0); + return appendCountBuf.getLong(0); } else { final long count = initialAppendCount + appendCounter.sum(); - appendBuf.putLong(0, count); + appendCountBuf.putLong(0, count); return count; } } @@ -404,7 +404,7 @@ public void clear() { } posBuf.putLong(0, HEADER_BYTES); posMem.set(HEADER_BYTES); - appendBuf.putLong(0, 0L); + appendCountBuf.putLong(0, 0L); initialAppendCount = 0L; Arrays.fill(pages, null); currentPage.set(0); @@ -439,8 +439,8 @@ public void flush() { if (readOnly) return; log.debug("flushing {}", file); posBuf.force(); - appendBuf.putLong(0, initialAppendCount + appendCounter.sum()); - appendBuf.force(); + appendCountBuf.putLong(0, initialAppendCount + appendCounter.sum()); + appendCountBuf.force(); Arrays.stream(pages) .parallel() diff --git a/src/main/java/com/upserve/uppend/CounterStorePartition.java b/src/main/java/com/upserve/uppend/CounterStorePartition.java index 4b0b374a..695aa11c 100644 --- a/src/main/java/com/upserve/uppend/CounterStorePartition.java +++ b/src/main/java/com/upserve/uppend/CounterStorePartition.java @@ -16,7 +16,7 @@ public class CounterStorePartition extends Partition { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - public static CounterStorePartition createPartition(Path parentDir, String partition, CounterStoreBuilder builder) { + static CounterStorePartition createPartition(Path parentDir, String partition, CounterStoreBuilder builder) { Path partitionDir = validatePartition(parentDir, partition); VirtualPageFile metadata = new VirtualPageFile( @@ -45,7 +45,7 @@ public static CounterStorePartition createPartition(Path parentDir, String parti return new CounterStorePartition(keys, metadata, false, builder); } - public static CounterStorePartition openPartition(Path partentDir, String partition, boolean readOnly, CounterStoreBuilder builder) { + static CounterStorePartition openPartition(Path partentDir, String partition, boolean readOnly, CounterStoreBuilder builder) { validatePartition(partition); Path partitiondDir = partentDir.resolve(partition); @@ -124,8 +124,8 @@ Stream keys() { } void clear() throws IOException { - longKeyFile.close(); - metadataBlobFile.close(); - SafeDeleting.removeDirectory(longKeyFile.getFilePath().getParent()); + getLongKeyFile().close(); + getMetadataBlobFile().close(); + SafeDeleting.removeDirectory(getLongKeyFile().getFilePath().getParent()); } } diff --git a/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java b/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java index 86f72699..2ae8b571 100644 --- a/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java +++ b/src/main/java/com/upserve/uppend/FileAppendOnlyStore.java @@ -105,11 +105,11 @@ Function getCreatePartitionFunction() { @Override public BlockedLongMetrics getBlockedLongMetrics() { LongSummaryStatistics blockedLongAllocatedBlocksStatistics = streamPartitions() - .mapToLong(partition -> partition.blocks.getBlockCount()) + .mapToLong(partition -> partition.getBlocks().getBlockCount()) .summaryStatistics(); LongSummaryStatistics blockedLongAppendCountStatistics = streamPartitions() - .mapToLong(partition -> partition.blocks.getCount()) + .mapToLong(partition -> partition.getBlocks().getCount()) .summaryStatistics(); return new BlockedLongMetrics( @@ -120,7 +120,7 @@ public BlockedLongMetrics getBlockedLongMetrics() { @Override public BlobStoreMetrics getBlobStoreMetrics() { LongSummaryStatistics blobStoreAllocatedPagesStatistics = streamPartitions() - .mapToLong(partition -> partition.blobFile.getAllocatedPageCount()) + .mapToLong(partition -> partition.getBlobFile().getAllocatedPageCount()) .summaryStatistics(); return new BlobStoreMetrics(blobStoreMetricsAdders, blobStoreAllocatedPagesStatistics); diff --git a/src/main/java/com/upserve/uppend/Partition.java b/src/main/java/com/upserve/uppend/Partition.java index 856679c1..87dedd6a 100644 --- a/src/main/java/com/upserve/uppend/Partition.java +++ b/src/main/java/com/upserve/uppend/Partition.java @@ -148,9 +148,9 @@ public void close() throws IOException { metadataBlobFile.close(); } - public VirtualPageFile getLongKeyFile() { return longKeyFile; } + VirtualPageFile getLongKeyFile() { return longKeyFile; } - public VirtualPageFile getMetadataBlobFile() {return metadataBlobFile; } + VirtualPageFile getMetadataBlobFile() { return metadataBlobFile; } private static boolean isValidPartitionCharStart(char c) { return Character.isJavaIdentifierPart(c); diff --git a/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java b/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java index 30160236..633c5394 100644 --- a/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java +++ b/src/main/java/com/upserve/uppend/blobs/VirtualAppendOnlyBlobStore.java @@ -8,8 +8,7 @@ public class VirtualAppendOnlyBlobStore extends VirtualPageFileIO { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - final BlobStoreMetrics.Adders blobStoreMetricsAdders; - + private final BlobStoreMetrics.Adders blobStoreMetricsAdders; public VirtualAppendOnlyBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile) { this(virtualFileNumber, virtualPageFile, new BlobStoreMetrics.Adders()); diff --git a/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java b/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java index e0f98aba..78e359db 100644 --- a/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java +++ b/src/main/java/com/upserve/uppend/blobs/VirtualLongBlobStore.java @@ -15,8 +15,7 @@ public class VirtualLongBlobStore extends VirtualPageFileIO { private static final Logger log = org.slf4j.LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - final LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders; - + private final LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders; public VirtualLongBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile, LongBlobStoreMetrics.Adders longBlobStoreMetricsAdders) { super(virtualFileNumber, virtualPageFile); diff --git a/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java b/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java index 58c72c1e..6e592ced 100644 --- a/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java +++ b/src/main/java/com/upserve/uppend/blobs/VirtualMutableBlobStore.java @@ -12,7 +12,7 @@ public class VirtualMutableBlobStore extends VirtualPageFileIO { private static final HashFunction hashFunction = Hashing.murmur3_32(); - final MutableBlobStoreMetrics.Adders mutableBlobStoreMetricsAdders; + private final MutableBlobStoreMetrics.Adders mutableBlobStoreMetricsAdders; public VirtualMutableBlobStore(int virtualFileNumber, VirtualPageFile virtualPageFile) { this(virtualFileNumber, virtualPageFile, new MutableBlobStoreMetrics.Adders()); @@ -70,7 +70,7 @@ private static int recordSize(byte[] inputBytes) { return inputBytes.length + 8; } - static byte[] byteChecksum(byte[] inputBytes) { + private static byte[] byteChecksum(byte[] inputBytes) { return hashFunction.hashBytes(inputBytes).asBytes(); } diff --git a/src/main/java/com/upserve/uppend/lookup/LookupData.java b/src/main/java/com/upserve/uppend/lookup/LookupData.java index 8e0fe3e8..b8b5d85e 100644 --- a/src/main/java/com/upserve/uppend/lookup/LookupData.java +++ b/src/main/java/com/upserve/uppend/lookup/LookupData.java @@ -54,10 +54,10 @@ public class LookupData implements Flushable, Trimmable { // Flushing every 30 seconds, we can run for 2000 years before the metaDataGeneration hits INTEGER.MAX_VALUE private AtomicInteger metaDataGeneration; - final LookupDataMetrics.Adders lookupDataMetricsAdders; + private final LookupDataMetrics.Adders lookupDataMetricsAdders; - public static LookupData lookupWriter(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, + static LookupData lookupWriter(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int flushThreshold){ return lookupWriter(keyLongBlobs, metadataBlobs, flushThreshold, new LookupDataMetrics.Adders()); } @@ -69,7 +69,7 @@ public static LookupData lookupWriter(VirtualLongBlobStore keyLongBlobs, Virtual ); } - public static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, + static LookupData lookupReader(VirtualLongBlobStore keyLongBlobs, VirtualMutableBlobStore metadataBlobs, int reloadInterval){ return lookupReader(keyLongBlobs, metadataBlobs, reloadInterval, new LookupDataMetrics.Adders()); } @@ -221,7 +221,7 @@ public long putIfNotExists(LookupKey key, LongSupplier allocateLongFunc) { * @param value the value to put if this is a new key * @return the value associated with the key */ - public long putIfNotExists(LookupKey key, long value) { + long putIfNotExists(LookupKey key, long value) { if (readOnly) throw new RuntimeException("Can not putIfNotExists in read only LookupData"); long[] ref = new long[1]; @@ -330,7 +330,7 @@ public Long put(LookupKey key, final long value) { * @param keyPosition the position in the longBlobs files * @return the cached lookup key */ - public LookupKey readKey(Long keyPosition) { + private LookupKey readKey(Long keyPosition) { return new LookupKey(keyLongBlobs.readBlob(keyPosition)); } @@ -340,7 +340,7 @@ public LookupKey readKey(Long keyPosition) { * @param keyPosition the position in the longBlobs files * @return the key and the long value associated with it */ - public Map.Entry readEntry(long keyPosition) { + private Map.Entry readEntry(long keyPosition) { return Maps.immutableEntry(readKey(keyPosition), readValue(keyPosition)); } @@ -350,7 +350,7 @@ public Map.Entry readEntry(long keyPosition) { * @param keyPosition the position in the longBlobs files * @return the long value */ - public long readValue(long keyPosition) { + private long readValue(long keyPosition) { return keyLongBlobs.readLong(keyPosition); } @@ -562,7 +562,7 @@ void generateMetaData(LookupMetadata currentMetadata) { ); } - protected LookupMetadata getMetadata() { + LookupMetadata getMetadata() { if (readOnly){ int[] stamp = new int[1]; LookupMetadata result = timeStampedMetadata.get(stamp); @@ -605,11 +605,13 @@ public synchronized void flush() { flushing.set(false); } + @Override public void trim() { if (!readOnly) { flush(); } else { + // Trim will do an explicit reload of the longLookup metadata int[] stamp = new int[1]; LookupMetadata result = timeStampedMetadata.get(stamp); result = loadMetadata(result); diff --git a/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java b/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java index 4d183a01..daa0f588 100644 --- a/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/BlobStoreMetrics.java @@ -4,7 +4,6 @@ import java.util.concurrent.atomic.LongAdder; public class BlobStoreMetrics implements InternalMetrics{ - // Stats for summed over all AppendOnlyBlobStore operations since the Uppend store was opened private final long bytesAppended; private final long appendCounter; @@ -13,7 +12,7 @@ public class BlobStoreMetrics implements InternalMetrics{ private final long readCounter; private final long readTimer; - // Partition level stats for the life of the blocked long store (Consistent on open) + // Partition level stats for the life of the Blob store (Consistent across reopen) private final double avgBlobStoreAllocatedPages; private final long maxBlobStoreAllocatedPages; private final long sumBlobStoreAllocatedPages; diff --git a/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java b/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java index ee7f3fee..d5d3b596 100644 --- a/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/BlockedLongMetrics.java @@ -4,7 +4,6 @@ import java.util.concurrent.atomic.LongAdder; public class BlockedLongMetrics implements InternalMetrics { - // Stats summed over all BlockedLongs stores since the Uppend store was opened private final long blockAllocationCounter; private final long appendCounter; @@ -15,10 +14,11 @@ public class BlockedLongMetrics implements InternalMetrics { private final long readLastCounter; private final long readLastTimer; - // Partition level stats for the life of the blocked long store (Consistent on open) + // Partition level stats for the life of the blocked long store (Consistent across reopen) private final double avgBlocksAllocated; private final long maxBlocksAllocated; private final long sumBlocksAllocated; + // For read only views, AppendCounter numbers are approximate, less than actual private final double avgAppendCounter; private final long maxAppendCounter; diff --git a/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java b/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java index 2e999cd9..e4dcde2d 100644 --- a/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/InternalMetrics.java @@ -2,10 +2,12 @@ public interface InternalMetrics> { String toString(); + String present(T previous); + T minus(T other); - public enum Prefix{ + enum Prefix { NANO(1e-9d), MICRO(1e-6d), MILLI(1e-3d), @@ -13,53 +15,43 @@ public enum Prefix{ KILO(1e3d), MEGA(1e6d), GIGA(1e9d); - + private double value; - - Prefix(double value){ + + Prefix(double value) { this.value = value; } - - public double getValue(){ + + public double getValue() { return value; } - public double toNano(double convert){ + public double toNano(double convert) { return value * convert / NANO.value; } - public double toMicro(double convert){ + public double toMicro(double convert) { return value * convert / MICRO.value; } - public double toMilli(double convert){ + public double toMilli(double convert) { return value * convert / MILLI.value; } - public double toNone(double convert){ + public double toNone(double convert) { return value * convert / NONE.value; } - public double toKilo(double convert){ + public double toKilo(double convert) { return value * convert / KILO.value; } - public double toMega(double convert){ + public double toMega(double convert) { return value * convert / MEGA.value; } - public double toGiga(double convert){ + public double toGiga(double convert) { return value * convert / GIGA.value; } } - - default String divFormat(double numerator, double denominator, int width){ - return divFormat(numerator, denominator, width, 2); - } - - default String divFormat(double numerator, double denominator, int width, int precision){ - return String.format("%" + width + '.' + precision + 'f', numerator / denominator); - } - - } diff --git a/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java b/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java index 35cb9d7a..04e86f54 100644 --- a/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/LongBlobStoreMetrics.java @@ -19,7 +19,7 @@ public class LongBlobStoreMetrics implements InternalMetrics { - + // Stats summed over all LookupData operations since the Uppend store was opened private final long flushedKeyCount; private final long flushCount; private final long flushTimer; @@ -17,11 +17,11 @@ public class LookupDataMetrics implements InternalMetrics { private final long findKeyTimer; + // LookupData level stats about the size of the lookups (consistent across reopen) private final double avgLookupDataSize; private final long maxLookupDataSize; private final long sumLookupDataSize; - public static class Adders { public final LongAdder flushCounter = new LongAdder(); public final LongAdder flushedKeyCounter = new LongAdder(); diff --git a/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java b/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java index 9b66d8d6..87bf53fd 100644 --- a/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java +++ b/src/main/java/com/upserve/uppend/metrics/MutableBlobStoreMetrics.java @@ -12,7 +12,7 @@ public class MutableBlobStoreMetrics implements InternalMetrics