diff --git a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/AbstractLongList.java b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/AbstractLongList.java
index ab416770f9fb..298f5e751267 100644
--- a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/AbstractLongList.java
+++ b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/AbstractLongList.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2021-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -221,15 +221,31 @@ protected AbstractLongList(
}
maxLongs = headerBuffer.getLong();
+
+ // Compute how many longs are in the file body
+ final long longsInFile = (fileChannel.size() - currentFileHeaderSize) / Long.BYTES;
+
if (formatVersion >= MIN_VALID_INDEX_SUPPORT_VERSION) {
- minValidIndex.set(headerBuffer.getLong());
- // "inflating" the size by number of indices that are to the left of the min valid index
- size.set(minValidIndex.get() + (fileChannel.size() - currentFileHeaderSize) / Long.BYTES);
+ final long readMinValidIndex = headerBuffer.getLong();
+
+ // If the file is empty or readMinValidIndex < 0, treat it as an empty list
+ if (longsInFile <= 0 || readMinValidIndex < 0) {
+ size.set(0);
+ minValidIndex.set(-1);
+ maxValidIndex.set(-1);
+ } else {
+ // Otherwise, compute the size by "inflating" it to include the number of indices to the left of
+ // the min valid index.
+ minValidIndex.set(readMinValidIndex);
+ size.set(readMinValidIndex + longsInFile);
+ maxValidIndex.set(size.get() - 1);
+ }
} else {
minValidIndex.set(0);
- size.set((fileChannel.size() - FILE_HEADER_SIZE_V1) / Long.BYTES);
+ size.set(longsInFile);
+ maxValidIndex.set(size.get() - 1);
}
- maxValidIndex.set(size.get() - 1);
+
chunkList = new AtomicReferenceArray<>(calculateNumberOfChunks(maxLongs));
readBodyFromFileChannelOnInit(file.getName(), fileChannel);
}
@@ -239,13 +255,92 @@ protected AbstractLongList(
/**
* Initializes the list from the given file channel. At the moment of the call all the class metadata
* is already initialized from the file header.
+ *
* @param sourceFileName the name of the file from which the list is initialized
* @param fileChannel the file channel to read the list body from
* @throws IOException if there was a problem reading the file
*/
- protected abstract void readBodyFromFileChannelOnInit(String sourceFileName, FileChannel fileChannel)
+ protected void readBodyFromFileChannelOnInit(String sourceFileName, FileChannel fileChannel) throws IOException {
+ if (minValidIndex.get() < 0) {
+ // Empty list, nothing to read
+ return;
+ }
+
+ final int firstChunkIndex = toIntExact(minValidIndex.get() / numLongsPerChunk);
+ final int lastChunkIndex = toIntExact(maxValidIndex.get() / numLongsPerChunk);
+ final int minValidIndexInChunk = toIntExact(minValidIndex.get() % numLongsPerChunk);
+ final int maxValidIndexInChunk = toIntExact(maxValidIndex.get() % numLongsPerChunk);
+
+ for (int chunkIndex = firstChunkIndex; chunkIndex <= lastChunkIndex; chunkIndex++) {
+ final int startIndexInChunk = (chunkIndex == firstChunkIndex) ? minValidIndexInChunk : 0;
+ final int endIndexInChunk = (chunkIndex == lastChunkIndex) ? (maxValidIndexInChunk + 1) : numLongsPerChunk;
+
+ C chunk = readChunkData(fileChannel, chunkIndex, startIndexInChunk, endIndexInChunk);
+ setChunk(chunkIndex, chunk);
+ }
+ }
+
+ /**
+ * Reads data from the specified {@code fileChannel} and stores it into a chunk.
+ * The data is read from the specified range within the chunk.
+ * Subclasses must implement this method to read data from the provided {@code fileChannel}.
+ *
+ * @param fileChannel the file channel to read from
+ * @param chunkIndex the index of the chunk to store the read data
+ * @param startIndex the starting index (inclusive) within the chunk
+ * @param endIndex the ending index (exclusive) within the chunk
+ * @return a chunk (byte buffer, array or long that represents an offset of the chunk)
+ * @throws IOException if there is an error reading the file
+ */
+ protected abstract C readChunkData(FileChannel fileChannel, int chunkIndex, int startIndex, int endIndex)
throws IOException;
+ /**
+ * Stores the specified chunk at the given {@code chunkIndex}.
+ *
+ * @param chunkIndex the index where the chunk is to be stored
+ * @param chunk the chunk to store
+ */
+ protected void setChunk(int chunkIndex, C chunk) {
+ chunkList.set(chunkIndex, chunk);
+ }
+
+ /**
+ * Reads a specified range of elements from a file channel into the given buffer.
+ *
+ * This method computes the appropriate byte offsets within the buffer and the number of bytes
+ * to read based on the provided {@code startIndex} and {@code endIndex}. It then performs a
+ * complete read of that data from the file channel into the buffer.
+ *
+ * @param fileChannel the file channel to read data from
+ * @param chunkIndex the index of the chunk being read
+ * @param startIndex the starting index (inclusive) within the chunk of the first element to read
+ * @param endIndex the ending index (exclusive) within the chunk of the last element to read
+ * @param buffer the buffer into which data will be read
+ * @throws IOException if an error occurs while reading from the file,
+ * or if the number of bytes read does not match the expected size
+ */
+ protected static void readDataIntoBuffer(
+ final FileChannel fileChannel,
+ final int chunkIndex,
+ final int startIndex,
+ final int endIndex,
+ final ByteBuffer buffer)
+ throws IOException {
+ final int startOffset = startIndex * Long.BYTES;
+ final int endOffset = endIndex * Long.BYTES;
+
+ buffer.position(startOffset);
+ buffer.limit(endOffset);
+
+ final int bytesToRead = endOffset - startOffset;
+ final long bytesRead = MerkleDbFileUtils.completelyRead(fileChannel, buffer);
+ if (bytesRead != bytesToRead) {
+ throw new IOException("Failed to read chunks, chunkIndex=" + chunkIndex + " expected=" + bytesToRead
+ + " actual=" + bytesRead);
+ }
+ }
+
/**
* Called when the list is initialized from an empty or absent source file.
* @param path the path to the source file
diff --git a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListDisk.java b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListDisk.java
index 2dbd18ca422f..0d6945bc5516 100644
--- a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListDisk.java
+++ b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListDisk.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2021-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,7 +25,6 @@
import com.swirlds.merkledb.utilities.MerkleDbFileUtils;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.io.IOException;
-import java.io.RandomAccessFile;
import java.io.UncheckedIOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
@@ -55,7 +54,7 @@ public class LongListDisk extends AbstractLongList {
/** This file channel is to work with the temporary file.
*/
- private final FileChannel currentFileChannel;
+ private FileChannel currentFileChannel;
/**
* Path to the temporary file used to store the data.
@@ -139,8 +138,6 @@ public LongListDisk(final Path file, final @NonNull Configuration configuration)
if (tempFile == null) {
throw new IllegalStateException("The temp file is not initialized");
}
- currentFileChannel = FileChannel.open(
- tempFile, StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
}
/**
@@ -157,57 +154,41 @@ protected void onEmptyOrAbsentSourceFile(final Path path) throws IOException {
protected void readBodyFromFileChannelOnInit(final String sourceFileName, final FileChannel fileChannel)
throws IOException {
tempFile = createTempFile(sourceFileName, configuration);
- if (minValidIndex.get() < 0) {
- // Nothing to read
- return;
- }
- // create temporary file for writing
- try (final RandomAccessFile rf = new RandomAccessFile(tempFile.toFile(), "rw")) {
- // ensure that the amount of disk space is enough
- // two additional chunks are required to accommodate "compressed" first and last chunks in the original file
- rf.setLength(fileChannel.size() + 2L * memoryChunkSize);
- final FileChannel tempFileCHannel = rf.getChannel();
-
- final int totalNumberOfChunks = calculateNumberOfChunks(size());
- final int firstChunkWithDataIndex = toIntExact(minValidIndex.get() / numLongsPerChunk);
- final int minValidIndexInChunk = toIntExact(minValidIndex.get() % numLongsPerChunk);
- final int lastChunkWithDataIndex = totalNumberOfChunks - firstChunkWithDataIndex - 1;
-
- // copy the first chunk
- final ByteBuffer transferBuffer = initOrGetTransferBuffer();
- // we need to make sure that the chunk is written in full.
- // If a value is absent, the list element will have IMPERMISSIBLE_VALUE
- fillBufferWithZeroes(transferBuffer);
- transferBuffer.position(minValidIndexInChunk * Long.BYTES);
- MerkleDbFileUtils.completelyRead(fileChannel, transferBuffer);
- transferBuffer.flip();
- // writing the full chunk, all values before minValidIndexInChunk are zeroes
- MerkleDbFileUtils.completelyWrite(tempFileCHannel, transferBuffer, 0);
- chunkList.set(firstChunkWithDataIndex, 0L);
-
- // copy everything except for the first chunk and the last chunk
- final int numberOfFullChunks = totalNumberOfChunks - firstChunkWithDataIndex - 2;
- if (numberOfFullChunks > 0) {
- final long bytesToTransfer = (long) numberOfFullChunks * memoryChunkSize;
- final long bytesTransferred = MerkleDbFileUtils.completelyTransferFrom(
- tempFileCHannel, fileChannel, memoryChunkSize, bytesToTransfer);
- if (bytesTransferred != bytesToTransfer) {
- throw new IOException("Failed to read long list chunks, expected=" + bytesToTransfer + " actual="
- + bytesTransferred);
- }
- }
- // copy the last chunk
- transferBuffer.clear();
- MerkleDbFileUtils.completelyRead(fileChannel, transferBuffer);
- transferBuffer.flip();
- MerkleDbFileUtils.completelyWrite(
- tempFileCHannel, transferBuffer, (long) lastChunkWithDataIndex * memoryChunkSize);
+ currentFileChannel = FileChannel.open(
+ tempFile, StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
- for (int i = firstChunkWithDataIndex + 1; i < totalNumberOfChunks; i++) {
- chunkList.set(i, (long) (i - firstChunkWithDataIndex) * memoryChunkSize);
- }
+ super.readBodyFromFileChannelOnInit(sourceFileName, fileChannel);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected Long readChunkData(FileChannel fileChannel, int chunkIndex, int startIndex, int endIndex)
+ throws IOException {
+ // read from `fileChannel`
+ final ByteBuffer transferBuffer = initOrGetTransferBuffer();
+ fillBufferWithZeroes(transferBuffer);
+
+ readDataIntoBuffer(fileChannel, chunkIndex, startIndex, endIndex, transferBuffer);
+
+ final int firstChunkIndex = toIntExact(minValidIndex.get() / numLongsPerChunk);
+ final long chunk = ((long) (chunkIndex - firstChunkIndex) * memoryChunkSize);
+
+ // write to `currentFileChannel`
+ int startOffset = startIndex * Long.BYTES;
+ int endOffset = endIndex * Long.BYTES;
+
+ transferBuffer.position(startOffset);
+ transferBuffer.limit(endOffset);
+
+ int bytesToWrite = endOffset - startOffset;
+ long bytesWritten = MerkleDbFileUtils.completelyWrite(currentFileChannel, transferBuffer, chunk + startOffset);
+ if (bytesWritten != bytesToWrite) {
+ throw new IOException("Failed to write long list (disk) chunks, chunkIndex=" + chunkIndex + " expected="
+ + bytesToWrite + " actual=" + bytesWritten);
}
+
+ return chunk;
}
private void fillBufferWithZeroes(ByteBuffer transferBuffer) {
diff --git a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListHeap.java b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListHeap.java
index 2f2f900cac9b..82c84bd2e99a 100644
--- a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListHeap.java
+++ b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListHeap.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2021-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@
import static java.nio.ByteBuffer.allocateDirect;
import com.swirlds.config.api.Configuration;
+import com.swirlds.merkledb.utilities.MemoryUtils;
import com.swirlds.merkledb.utilities.MerkleDbFileUtils;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.io.IOException;
@@ -51,6 +52,9 @@
@SuppressWarnings("unused")
public final class LongListHeap extends AbstractLongList {
+ /** A buffer for reading chunk data from the file only during the initialization. */
+ private ByteBuffer initReadBuffer;
+
/** Construct a new LongListHeap with the default number of longs per chunk. */
public LongListHeap() {
this(DEFAULT_NUM_LONGS_PER_CHUNK, DEFAULT_MAX_LONGS_TO_STORE, 0);
@@ -87,23 +91,32 @@ public LongListHeap(final Path file, final Configuration configuration) throws I
/** {@inheritDoc} */
@Override
- protected void readBodyFromFileChannelOnInit(String sourceFileName, FileChannel fileChannel) throws IOException {
- // read data
- final int numOfArrays = calculateNumberOfChunks(size());
- final ByteBuffer buffer = allocateDirect(memoryChunkSize);
- buffer.order(ByteOrder.nativeOrder());
- for (int i = 0; i < numOfArrays; i++) {
- final AtomicLongArray atomicLongArray = new AtomicLongArray(numLongsPerChunk);
- buffer.clear();
- MerkleDbFileUtils.completelyRead(fileChannel, buffer);
- buffer.flip();
- int index = 0;
- while (buffer.remaining() > 0) {
- atomicLongArray.set(index, buffer.getLong());
- index++;
- }
- chunkList.set(i, atomicLongArray);
+ protected void readBodyFromFileChannelOnInit(final String sourceFileName, final FileChannel fileChannel)
+ throws IOException {
+ initReadBuffer = ByteBuffer.allocateDirect(memoryChunkSize).order(ByteOrder.nativeOrder());
+ try {
+ super.readBodyFromFileChannelOnInit(sourceFileName, fileChannel);
+ } finally {
+ MemoryUtils.closeDirectByteBuffer(initReadBuffer);
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected AtomicLongArray readChunkData(FileChannel fileChannel, int chunkIndex, int startIndex, int endIndex)
+ throws IOException {
+ AtomicLongArray chunk = createChunk();
+
+ readDataIntoBuffer(fileChannel, chunkIndex, startIndex, endIndex, initReadBuffer);
+ final int startOffset = startIndex * Long.BYTES;
+ initReadBuffer.position(startOffset);
+
+ while (initReadBuffer.hasRemaining()) {
+ int index = initReadBuffer.position() / Long.BYTES;
+ chunk.set(index, initReadBuffer.getLong());
}
+
+ return chunk;
}
/** {@inheritDoc} */
diff --git a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListOffHeap.java b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListOffHeap.java
index 85804830662a..7718efe30dc4 100644
--- a/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListOffHeap.java
+++ b/platform-sdk/swirlds-merkledb/src/main/java/com/swirlds/merkledb/collections/LongListOffHeap.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2021-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -87,26 +87,11 @@ public LongListOffHeap(final Path file, final Configuration configuration) throw
/** {@inheritDoc} */
@Override
- protected void readBodyFromFileChannelOnInit(String sourceFileName, FileChannel fileChannel) throws IOException {
- if (minValidIndex.get() < 0) {
- // Empty list, nothing to read
- return;
- }
- final int totalNumberOfChunks = calculateNumberOfChunks(size());
- final int firstChunkWithDataIndex = toIntExact(minValidIndex.get() / numLongsPerChunk);
- final int minValidIndexInChunk = toIntExact(minValidIndex.get() % numLongsPerChunk);
- // read the first chunk
- final ByteBuffer firstBuffer = createChunk();
- firstBuffer.position(minValidIndexInChunk * Long.BYTES).limit(firstBuffer.capacity());
- MerkleDbFileUtils.completelyRead(fileChannel, firstBuffer);
- chunkList.set(firstChunkWithDataIndex, firstBuffer);
- // read the rest of the data
- for (int i = firstChunkWithDataIndex + 1; i < totalNumberOfChunks; i++) {
- final ByteBuffer directBuffer = createChunk();
- MerkleDbFileUtils.completelyRead(fileChannel, directBuffer);
- directBuffer.position(0);
- chunkList.set(i, directBuffer);
- }
+ protected ByteBuffer readChunkData(FileChannel fileChannel, int chunkIndex, int startIndex, int endIndex)
+ throws IOException {
+ ByteBuffer chunk = createChunk();
+ readDataIntoBuffer(fileChannel, chunkIndex, startIndex, endIndex, chunk);
+ return chunk;
}
/** {@inheritDoc} */
@@ -157,7 +142,7 @@ protected void writeLongsData(final FileChannel fc) throws IOException {
final ByteBuffer nonNullBuffer = requireNonNullElse(byteBuffer, emptyBuffer);
// Slice so we don't mess with the byte buffer pointers.
// Also, the slice size has to be equal to the size of the buffer
- final ByteBuffer buf = nonNullBuffer.slice(0, nonNullBuffer.capacity());
+ final ByteBuffer buf = nonNullBuffer.slice(0, nonNullBuffer.limit());
if (i == firstChunkWithDataIndex) {
// writing starts from the first valid index in the first valid chunk
final int firstValidIndexInChunk = toIntExact(currentMinValidIndex % numLongsPerChunk);
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/AbstractLongListTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/AbstractLongListTest.java
index 8a81fccaad6f..1ed30591f9bd 100644
--- a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/AbstractLongListTest.java
+++ b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/AbstractLongListTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2016-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,59 +18,94 @@
import static com.swirlds.base.units.UnitConstants.BYTES_TO_MEBIBYTES;
import static com.swirlds.base.units.UnitConstants.MEBIBYTES_TO_BYTES;
+import static com.swirlds.common.test.fixtures.RandomUtils.nextInt;
+import static com.swirlds.merkledb.collections.AbstractLongList.DEFAULT_MAX_LONGS_TO_STORE;
+import static com.swirlds.merkledb.collections.AbstractLongList.DEFAULT_NUM_LONGS_PER_CHUNK;
import static com.swirlds.merkledb.collections.AbstractLongList.FILE_HEADER_SIZE_V2;
-import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.*;
+import static com.swirlds.merkledb.collections.LongList.IMPERMISSIBLE_VALUE;
+import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.CONFIGURATION;
+import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.checkDirectMemoryIsCleanedUpToLessThanBaseUsage;
+import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.getDirectMemoryUsedBytes;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import com.swirlds.common.test.fixtures.io.ResourceLoader;
import com.swirlds.config.api.Configuration;
import java.io.IOException;
+import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Spliterator;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.BiFunction;
+import java.util.function.LongConsumer;
+import java.util.function.Supplier;
+import java.util.stream.Stream;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.junit.jupiter.params.provider.ValueSource;
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
abstract class AbstractLongListTest> {
+ // Constants (used in ordered and some of the other tests)
+
+ protected static final int SAMPLE_SIZE = 10_000;
+ protected static final int NUM_LONGS_PER_CHUNK = 33;
+ protected static final long MAX_LONGS = SAMPLE_SIZE * 2L;
+ protected static final int MAX_VALID_INDEX = SAMPLE_SIZE - 1;
+ protected static final int HALF_SAMPLE_SIZE = SAMPLE_SIZE / 2;
+
private static final int OUT_OF_SAMPLE_INDEX = 13_000_123;
private static final long REPL_VALUE = 42;
private static final long DEFAULT_VALUE = 0;
+ // Variables used in ordered tests
+
private static AbstractLongList> longList;
- protected int getSampleSize() {
- return 1_000_000;
- }
+ /**
+ * Keep track of initial direct memory used already, so we can check if we leek over and above what we started with
+ */
+ private static long directMemoryUsedAtStart;
- protected AbstractLongList> createLongList() {
- return new LongListHeap();
- }
+ // Factory methods for creating different configurations of LongList instances
+
+ protected abstract AbstractLongList> createLongList();
@SuppressWarnings("SameParameterValue")
protected abstract T createLongListWithChunkSizeInMb(final int chunkSizeInMb);
protected abstract T createFullyParameterizedLongListWith(final int numLongsPerChunk, final long maxLongs);
- protected abstract T createLongListFromFile(final Path file, final Configuration configuration) throws IOException;
+ protected abstract T createLongListFromFile(final Path file) throws IOException;
- /**
- * Keep track of initial direct memory used already, so we can check if we leek over and above what we started with
- */
- private static long directMemoryUsedAtStart;
+ // Ordered tests
@Test
@Order(1)
- void createData() {
- directMemoryUsedAtStart = getDirectMemoryUsedBytes();
+ void testCreateData() {
longList = createLongList();
final long capacity = longList.capacity();
+ directMemoryUsedAtStart = getDirectMemoryUsedBytes();
assertEquals(
AbstractLongList.DEFAULT_MAX_LONGS_TO_STORE,
@@ -105,52 +140,36 @@ void createData() {
() -> longList.putIfEqual(capacity, 1, -1),
"Capacity should not be a valid index");
- longList.updateValidRange(0, getSampleSize());
- for (int i = 1; i < getSampleSize(); i++) {
- longList.put(i, i);
+ longList.updateValidRange(0, SAMPLE_SIZE - 1);
+ for (int i = 0; i < SAMPLE_SIZE; i++) {
+ longList.put(i, i + 100);
}
}
@Test
@Order(2)
- void check() {
- checkRange();
- }
-
- @Test
- @Order(3)
- void writeToFileAndReadBack(@TempDir final Path tempDir) throws IOException {
- final int sampleSize = getSampleSize();
- final Path file = tempDir.resolve("LongListByteBufferTest.hl");
- // write longList data
- longList.writeToFile(file);
- // check file exists and contains some data
- assertTrue(Files.exists(file), "file does not exist");
- assertEquals(
- (FILE_HEADER_SIZE_V2 + (Long.BYTES * (long) sampleSize)),
- Files.size(file),
- "Expected file to contain all the data so its size [" + Files.size(file)
- + "] should have been header plus longs data size ["
- + (FILE_HEADER_SIZE_V2 + (Long.BYTES * (sampleSize)))
- + "]");
- // check all data, to make sure it did not get messed up
- for (int i = 0; i < sampleSize; i++) {
+ void testCheckData() {
+ for (int i = 0; i < SAMPLE_SIZE; i++) {
final long readValue = longList.get(i, 0);
- assertEquals(i, readValue, "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + "]");
- }
- // now try and construct a new LongList reading from the file
- try (final LongList longList2 = createLongListFromFile(file, CONFIGURATION)) {
- // now check data and other attributes
- assertEquals(longList.capacity(), longList2.capacity(), "Unexpected value for longList2.capacity()");
- assertEquals(longList.size(), longList2.size(), "Unexpected value for longList2.size()");
- for (int i = 0; i < sampleSize; i++) {
- final long readValue = longList2.get(i, 0);
- assertEquals(
- i, readValue, "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + "]");
- }
+ assertEquals(
+ i + 100,
+ readValue,
+ "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + 100 + "]");
}
- // delete file as we are done with it
- Files.delete(file);
+
+ final AtomicInteger atomicI = new AtomicInteger(0);
+ longList.stream().forEach(readValue -> {
+ final int i = atomicI.getAndIncrement();
+ assertEquals(
+ i + 100,
+ readValue,
+ "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + 100 + "]");
+ });
+
+ assertEquals(
+ SAMPLE_SIZE,
+ longList.stream().parallel().summaryStatistics().getCount(),
+ "Stream size should match initial sample size");
}
@Test
@@ -190,18 +209,28 @@ void testPutIfEqual() {
@Test
@Order(5)
- void chunkSizeFactoryWorks() {
- final int expectedNum = Math.toIntExact(2 * MEBIBYTES_TO_BYTES / Long.BYTES);
-
- final AbstractLongList> subject2mbChunks = createLongListWithChunkSizeInMb(2);
-
- checkNumLongsPerChunk(subject2mbChunks, expectedNum);
+ void testClose() {
+ if (longList != null) {
+ longList.close();
+ }
+ // Check all memory is freed after DB is closed, but skip for LongListDisk
+ // as LongListDisk use file-based operations (FileChannel#write in LongListDisk#closeChunk)
+ // that don't immediately free memory due to OS-level caching
+ if (!(longList instanceof LongListDisk)) {
+ assertTrue(
+ checkDirectMemoryIsCleanedUpToLessThanBaseUsage(directMemoryUsedAtStart),
+ "Direct Memory used is more than base usage even after 20 gc() calls. At start was "
+ + (directMemoryUsedAtStart * BYTES_TO_MEBIBYTES) + "MB and is now "
+ + (getDirectMemoryUsedBytes() * BYTES_TO_MEBIBYTES)
+ + "MB");
+ }
}
+ // Tests without `@Order`
+
@SuppressWarnings("resource")
@Test
- @Order(6)
- void constructorValidatesArgs() {
+ void testConstructorValidatesArgs() {
assertThrows(
IllegalArgumentException.class,
() -> createFullyParameterizedLongListWith(1, -1),
@@ -210,65 +239,1028 @@ void constructorValidatesArgs() {
IllegalArgumentException.class,
() -> createFullyParameterizedLongListWith(Integer.MAX_VALUE, 1000),
"Should not be able to create with a more longs per chunk than maxLongs");
+ assertThrows(
+ ArithmeticException.class,
+ () -> createFullyParameterizedLongListWith((Integer.MAX_VALUE / 8) + 1, Integer.MAX_VALUE),
+ "Check that ArithmeticException of num longs per chuck is too big");
+ assertThrows(
+ IllegalArgumentException.class,
+ () -> createFullyParameterizedLongListWith(Integer.MAX_VALUE - 1, Integer.MAX_VALUE),
+ "Check that IllegalArgumentException of num longs per chuck is too big");
}
@Test
- @Order(6)
- void testClose() {
- // close
- if (longList != null) {
- longList.close();
+ void testChunkSizeFactoryWorks() {
+ final int expectedNum = Math.toIntExact(2 * MEBIBYTES_TO_BYTES / Long.BYTES);
+
+ try (final AbstractLongList> longList = createLongListWithChunkSizeInMb(2)) {
+ assertEquals(
+ expectedNum,
+ longList.getNumLongsPerChunk(),
+ "Long List implementations should respect constructor parameter for numLongsPerChunk");
}
- // check all memory is freed after DB is closed
- assertTrue(
- checkDirectMemoryIsCleanedUpToLessThanBaseUsage(directMemoryUsedAtStart),
- "Direct Memory used is more than base usage even after 20 gc() calls. At start was "
- + (directMemoryUsedAtStart * BYTES_TO_MEBIBYTES) + "MB and is now "
- + (getDirectMemoryUsedBytes() * BYTES_TO_MEBIBYTES)
- + "MB");
}
@Test
- void writeReadEmptyList(@TempDir final Path tempDir) throws IOException {
- try (final AbstractLongList> list = createLongList()) {
- final Path file = tempDir.resolve("writeReadEmptyList.ll");
- // write longList data
- list.writeToFile(file);
- // check file exists and contains some data
- assertTrue(Files.exists(file), "file does not exist");
- // now try and construct a new LongList reading from the file
- try (final LongList list2 = createLongListFromFile(file, CONFIGURATION)) {
- // now check data and other attributes
- assertEquals(list2.capacity(), list.capacity(), "Unexpected value for list2.capacity()");
- assertEquals(list2.size(), list2.size(), "Unexpected value for list2.size()");
+ void testInsertAtTheEndOfTheList() {
+ try (final LongList longList = createLongList()) {
+ longList.updateValidRange(0, DEFAULT_MAX_LONGS_TO_STORE - 1);
+ assertDoesNotThrow(() -> longList.put(DEFAULT_MAX_LONGS_TO_STORE - 1, 1));
+ }
+ }
+
+ @Test
+ void testInsertAtTheEndOfTheListCustomConfigured() {
+ final int MAX_LONGS = 10;
+ try (final LongList longList = createFullyParameterizedLongListWith(10, MAX_LONGS)) {
+ longList.updateValidRange(0, MAX_LONGS - 1);
+ assertDoesNotThrow(() -> longList.put(MAX_LONGS - 1, 1));
+ }
+ }
+
+ @Test
+ void testUnsupportedVersion() throws URISyntaxException {
+ final Path pathToList = ResourceLoader.getFile("test_data/LongList_unsupported_version.ll");
+ assertThrows(IOException.class, () -> {
+ //noinspection EmptyTryBlock
+ try (final LongList ignored = createLongListFromFile(pathToList)) {
+ // no op
}
- // delete file as we are done with it
+ });
+ }
+
+ /**
+ * Validate that AbstractLongList#close() and it's overrides does not create any negative side effects
+ * for the future long list instances.
+ *
+ * @param tempDir temporary directory for storing test files, automatically cleaned up after the test.
+ * @throws IOException if file operations fail.
+ */
+ @Test
+ void testCloseAndRecreateLongListMultipleTimes(@TempDir final Path tempDir) throws IOException {
+ final Path file = tempDir.resolve("testCloseAndRecreateLongListMultipleTimes.ll");
+ if (Files.exists(file)) {
Files.delete(file);
}
+
+ try (final LongList longList = createFullyParameterizedLongListWith(NUM_LONGS_PER_CHUNK, MAX_LONGS)) {
+ longList.updateValidRange(0, SAMPLE_SIZE);
+ for (int i = 0; i <= SAMPLE_SIZE; i++) {
+ longList.put(i, i + 100);
+ }
+ longList.writeToFile(file);
+ assertTrue(Files.exists(file), "The file should exist after writing with the first list");
+ }
+
+ try (final LongList longListFromFile = createLongListFromFile(file)) {
+ for (int i = 0; i <= SAMPLE_SIZE; i++) {
+ assertEquals(i + 100, longListFromFile.get(i), "Data should match in the second list");
+ }
+ }
+
+ try (final LongList anotherLongListFromFile = createLongListFromFile(file)) {
+ for (int i = 0; i <= SAMPLE_SIZE; i++) {
+ assertEquals(i + 100, anotherLongListFromFile.get(i), "Data should still match in the third list");
+ }
+ }
}
- private void checkRange() {
- for (int i = 0; i < getSampleSize(); i++) {
- final long readValue = longList.get(i, 0);
- assertEquals(i, readValue, "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + "]");
+ // SAMPLE_SIZE should be 10K for this test
+ @Test
+ void testBackwardCompatibilityHalfEmpty_10K() throws URISyntaxException, IOException {
+ // Load a pre-existing file representing a half-empty LongList
+ final Path longListFile = ResourceLoader.getFile("test_data/LongListHalfEmpty_10k_10pc_v1.ll");
+
+ // Reconstruct the long list from the file and validate its content
+ try (final LongList readerList = createLongListFromFile(longListFile)) {
+ // Verify the first half of the list is empty
+ checkEmptyUpToIndex(readerList, HALF_SAMPLE_SIZE);
+
+ // Verify the second half of the list contains expected values
+ for (int i = HALF_SAMPLE_SIZE; i < SAMPLE_SIZE; i++) {
+ assertEquals(i, readerList.get(i), "Mismatch in value at index " + i);
+ }
}
+ }
- final AtomicInteger atomicI = new AtomicInteger(0);
- longList.stream().forEach(readValue -> {
- final int i = atomicI.getAndIncrement();
- assertEquals(i, readValue, "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + "]");
+ @Test
+ void testReuseOfChunksMinValidIndex() throws IOException {
+ // Create a LongList with the specified number of longs per chunk and max longs
+ try (final LongList longList = createFullyParameterizedLongListWith(100, MAX_LONGS)) {
+ // Populate the list with initial values and validate its contents
+ populateList(longList);
+ checkData(longList);
+
+ // Save the original file size for comparison (for LongListDisk)
+ // temporary file channel doesn't contain the header
+ long originalChannelSize = 0;
+ if (longList instanceof LongListDisk) {
+ originalChannelSize =
+ ((LongListDisk) longList).getCurrentFileChannel().size();
+ }
+
+ // Free up chunks below HALF_SAMPLE_SIZE by updating the minimum valid index
+ longList.updateValidRange(HALF_SAMPLE_SIZE, MAX_LONGS - 1);
+
+ // Populate newly valid range using the previously freed-up chunks
+ for (int i = SAMPLE_SIZE; i < SAMPLE_SIZE + HALF_SAMPLE_SIZE; i++) {
+ longList.put(i, i + 100);
+ }
+
+ if (longList instanceof LongListDisk) {
+ // Validate that the file size has not changed after reusing chunks
+ assertEquals(
+ originalChannelSize,
+ ((LongListDisk) longList).getCurrentFileChannel().size());
+ }
+
+ // Verify that indices below HALF_SAMPLE_SIZE are cleared
+ checkEmptyUpToIndex(longList, HALF_SAMPLE_SIZE);
+
+ // Verify that all values in the newly valid range are correctly populated
+ checkData(longList, HALF_SAMPLE_SIZE, SAMPLE_SIZE + HALF_SAMPLE_SIZE);
+ }
+ }
+
+ @Test
+ void testReuseOfChunksMaxValidIndex() throws IOException {
+ // Create a LongList with the specified number of longs per chunk and max longs
+ try (final LongList longList = createFullyParameterizedLongListWith(100, MAX_LONGS)) {
+ // Populate the list with initial values and validate its contents
+ populateList(longList);
+ checkData(longList);
+
+ // Save the original file size for comparison (for LongListDisk)
+ // temporary file channel doesn't contain the header
+ long originalChannelSize = 0;
+ if (longList instanceof LongListDisk) {
+ originalChannelSize =
+ ((LongListDisk) longList).getCurrentFileChannel().size();
+ }
+
+ // Free up chunks beyond HALF_SAMPLE_SIZE by updating the valid range
+ longList.updateValidRange(0, HALF_SAMPLE_SIZE);
+
+ // Extend the valid range to include the previously freed-up chunks
+ longList.updateValidRange(0, SAMPLE_SIZE - 1);
+
+ // Populate the newly valid range to reuse freed-up chunks
+ for (int i = HALF_SAMPLE_SIZE; i < SAMPLE_SIZE; i++) {
+ longList.put(i, i + 100);
+ }
+
+ if (longList instanceof LongListDisk) {
+ // Validate that the file size has not changed after reusing chunks
+ assertEquals(
+ originalChannelSize,
+ ((LongListDisk) longList).getCurrentFileChannel().size());
+ }
+
+ // Ensure all data, including reused chunk data, is correct
+ checkData(longList);
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] countDivider={0}")
+ @ValueSource(ints = {2, 3, 4, 5, 10, 50})
+ void testMinValidIndexRespectedInForEach(final int countDivider) throws InterruptedException {
+ // Create a LongList where each chunk holds 100 longs, resulting in 100 chunks
+ try (final LongList longList = createFullyParameterizedLongListWith(100, MAX_LONGS)) {
+
+ // Populate the list with initial values and validate its contents
+ populateList(longList);
+ checkData(longList);
+
+ // Update the minimum valid index to exclude entries below SAMPLE_SIZE / countDivider
+ final long minIndex = SAMPLE_SIZE / countDivider;
+ longList.updateValidRange(minIndex, longList.size() - 1);
+
+ // Count valid entries and collect their indices
+ final AtomicLong count = new AtomicLong(0);
+ final Set keysInForEach = new HashSet<>();
+ longList.forEach((path, location) -> {
+ count.incrementAndGet();
+ keysInForEach.add(path);
+
+ assertEquals(path + 100, location, "Mismatch in value for index " + path);
+ });
+
+ // Ensure the number of valid indices matches the expected range
+ assertEquals(
+ SAMPLE_SIZE - minIndex,
+ count.get(),
+ "The number of valid index entries does not match expected count");
+ assertEquals(
+ SAMPLE_SIZE - minIndex,
+ keysInForEach.size(),
+ "The size of valid index set does not match expected count");
+ }
+ }
+
+ @Test
+ void testSpliteratorEdgeCases() {
+ final LongConsumer firstConsumer = mock(LongConsumer.class);
+ final LongConsumer secondConsumer = mock(LongConsumer.class);
+
+ try (final LongList longList = createFullyParameterizedLongListWith(32, 32)) {
+ longList.updateValidRange(0, 3);
+ for (int i = 1; i <= 3; i++) {
+ longList.put(i, i);
+ }
+
+ final LongListSpliterator subject = new LongListSpliterator(longList);
+
+ assertThrows(
+ IllegalStateException.class,
+ subject::getComparator,
+ "An unordered spliterator should not be asked to provide an ordering");
+
+ final Spliterator.OfLong firstSplit = subject.trySplit();
+ assertNotNull(firstSplit, "firstSplit should not be null");
+ assertEquals(2, subject.estimateSize(), "Splitting 4 elements should yield 2");
+ final Spliterator.OfLong secondSplit = subject.trySplit();
+ assertNotNull(secondSplit, "secondSplit should not be null");
+ assertEquals(1, subject.estimateSize(), "Splitting 2 elements should yield 1");
+ assertNull(subject.trySplit(), "Splitting 1 element should yield null");
+
+ assertTrue(firstSplit.tryAdvance(firstConsumer), "First split should yield 0 first");
+ verify(firstConsumer).accept(0);
+ assertTrue(firstSplit.tryAdvance(firstConsumer), "First split should yield 1 second");
+ verify(firstConsumer).accept(1);
+ assertFalse(firstSplit.tryAdvance(firstConsumer), "First split should be exhausted after 2 yields");
+
+ secondSplit.forEachRemaining(secondConsumer);
+ verify(secondConsumer).accept(2);
+ verifyNoMoreInteractions(secondConsumer);
+ }
+ }
+
+ // Parametrized tests to test cross compatibility between the Long List implementations
+
+ /**
+ * A named factory for producing new {@link AbstractLongList} instances, used primarily as a "writer"
+ * in parameterized tests. The {@code name} field is for logging or display in test output, and
+ * {@code createInstance} is the function that constructs a new {@link AbstractLongList}.
+ */
+ public record LongListWriterFactory(String name, Supplier> createInstance) {
+ @Override
+ public String toString() {
+ return name;
+ }
+ }
+
+ /**
+ * A named factory for reconstructing {@link AbstractLongList} instances from a file, serving as a "reader"
+ * in parameterized tests. The {@code name} field is for test output identification, and
+ * {@code createFromFile} is a function that loads a {@link AbstractLongList} given a {@link Path}
+ * and {@link Configuration}.
+ */
+ public record LongListReaderFactory(
+ String name, BiFunction> createFromFile) {
+ @Override
+ public String toString() {
+ return name;
+ }
+ }
+
+ /**
+ * Factories (named suppliers) for creating different {@link AbstractLongList} implementations
+ * with test configuration.
+ */
+ static LongListWriterFactory heapWriterFactory = new LongListWriterFactory(
+ LongListHeap.class.getSimpleName(), () -> new LongListHeap(NUM_LONGS_PER_CHUNK, MAX_LONGS, 0));
+
+ static LongListWriterFactory offHeapWriterFactory = new LongListWriterFactory(
+ LongListOffHeap.class.getSimpleName(), () -> new LongListOffHeap(NUM_LONGS_PER_CHUNK, MAX_LONGS, 0));
+ static LongListWriterFactory diskWriterFactory = new LongListWriterFactory(
+ LongListDisk.class.getSimpleName(),
+ () -> new LongListDisk(NUM_LONGS_PER_CHUNK, MAX_LONGS, 0, CONFIGURATION));
+
+ /**
+ * Factories (named BiFunctions) for reconstructing different {@link AbstractLongList}
+ * implementations from files.
+ */
+ static LongListReaderFactory heapReaderFactory =
+ new LongListReaderFactory(LongListHeap.class.getSimpleName(), (file, config) -> {
+ try {
+ return new LongListHeap(file, config);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+
+ static LongListReaderFactory offHeapReaderFactory =
+ new LongListReaderFactory(LongListOffHeap.class.getSimpleName(), (file, config) -> {
+ try {
+ return new LongListOffHeap(file, config);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ static LongListReaderFactory diskReaderFactory =
+ new LongListReaderFactory(LongListDisk.class.getSimpleName(), (file, config) -> {
+ try {
+ return new LongListDisk(file, config);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+
+ /**
+ * Generates a stream of writer-reader argument pairs for testing cross-compatibility
+ * of different long list implementations. The writer implementation is supplied as
+ * a parameter, and the method pairs it with three readers (heap, off-heap, and disk-based)
+ * to test whether data written by one implementation can be correctly read by another.
+ *
+ * This method is used internally to support the creation of specific writer-reader pairs
+ * for different test configurations.
+ *
+ * @param writerFactory a supplier providing the writer long list implementation
+ * @return a stream of arguments containing the writer and its corresponding readers
+ */
+ protected static Stream longListWriterBasedPairsProvider(final LongListWriterFactory writerFactory) {
+ return Stream.of(
+ Arguments.of(writerFactory, heapReaderFactory),
+ Arguments.of(writerFactory, offHeapReaderFactory),
+ Arguments.of(writerFactory, diskReaderFactory));
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}")
+ @MethodSource("longListWriterReaderPairsProvider")
+ void testWriteAndReadBackEmptyList(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final LongList writerList = writerFactory.createInstance().get()) {
+ // Write the empty LongList to a file and verify its existence
+ final String TEMP_FILE_NAME = String.format(
+ "testWriteAndReadBackEmptyList_write_%s_read_back_%s.ll", writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate the reconstructed list's attributes
+ assertEquals(writerList.capacity(), readerList.capacity(), "Capacity mismatch in reconstructed list.");
+ assertEquals(writerList.size(), readerList.size(), "Size mismatch in reconstructed list.");
+ } finally {
+ Files.delete(longListFile);
+ }
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}")
+ @MethodSource("longListWriterReaderPairsProvider")
+ void testWriteAndReadBackEmptyListWithValidRange(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final LongList writerList = writerFactory.createInstance().get()) {
+ // Update the valid range to something non-empty
+ writerList.updateValidRange(0, 5000);
+
+ // Write this "empty" LongList (no actual data put) to a file
+ final String TEMP_FILE_NAME = String.format(
+ "testWriteAndReadBackEmptyListWithValidRange_write_%s_read_back_%s.ll",
+ writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Read the list back from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Because the list actually contained no data, it is effectively empty and so the valid range is reset
+ assertEquals(0, readerList.size(), "An empty list should have size 0");
+ assertEquals(-1, readerList.getMinValidIndex(), "For an empty list, minValidIndex should be -1");
+ assertEquals(-1, readerList.getMaxValidIndex(), "For an empty list, maxValidIndex should be -1");
+ } finally {
+ Files.delete(longListFile);
+ }
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}")
+ @MethodSource("longListWriterReaderPairsProvider")
+ void testWriteAndReadBackWithValidRange(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final LongList writerList = createLongList()) {
+ // Put a single value in a small valid range
+ writerList.updateValidRange(1, 1);
+ writerList.put(1, 1);
+
+ // Write this LongList to a file
+ final String TEMP_FILE_NAME = String.format(
+ "testWriteAndReadBackWithValidRange_write_%s_read_back_%s.ll", writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Reconstruct the list from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ final String TEMP_FILE_NAME_2 = String.format(
+ "testWriteAndReadBackWithValidRange_again_write_%s_read_back_%s.ll",
+ writerFactory, readerFactory);
+
+ // Verify that writing the read list to a new file doesn't cause exceptions
+ assertDoesNotThrow(() -> {
+ final Path longListFile2 = writeLongListToFileAndVerify(readerList, TEMP_FILE_NAME_2, tempDir);
+ Files.delete(longListFile2);
+ });
+ } finally {
+ Files.delete(longListFile);
+ }
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}")
+ @MethodSource("longListWriterReaderPairsProvider")
+ void testWriteAndReadBackBigIndex(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final LongList writerList =
+ createFullyParameterizedLongListWith(DEFAULT_NUM_LONGS_PER_CHUNK, DEFAULT_MAX_LONGS_TO_STORE)) {
+ // Use a large index to test beyond the typical Integer.MAX_VALUE range
+ long bigIndex = Integer.MAX_VALUE + 1L;
+ writerList.updateValidRange(bigIndex, bigIndex);
+ writerList.put(bigIndex, 1);
+
+ // Verify the value was written correctly
+ assertEquals(1, writerList.get(bigIndex), "Value mismatch for the large index.");
+
+ // Write the long list to a file and verify its existence
+ final String TEMP_FILE_NAME = String.format(
+ "testWriteAndReadBackBigIndex_write_%s_read_back_%s.ll", writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Reconstruct the LongList from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate that the large index is correctly reconstructed
+ assertEquals(1, readerList.get(bigIndex), "Value mismatch for the large index after reconstruction.");
+ } finally {
+ // Clean up the temporary file
+ Files.delete(longListFile);
+ }
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}")
+ @MethodSource("longListWriterReaderPairsProvider")
+ void testShrinkListMinValidIndex(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final AbstractLongList> writerList = writerFactory.createInstance.get()) {
+ // Populate the long list with initial values and validate its contents
+ populateList(writerList);
+ checkData(writerList);
+
+ // Save the original file size for comparison (for LongListDisk)
+ long originalFileSize = 0;
+ if (writerList instanceof LongListDisk) {
+ originalFileSize =
+ ((LongListDisk) writerList).getCurrentFileChannel().size() + writerList.currentFileHeaderSize;
+ }
+
+ // Update the valid range to shrink the list by setting a new minimum valid index
+ writerList.updateValidRange(HALF_SAMPLE_SIZE, MAX_LONGS - 1);
+
+ // Validate that the first half of the list is now empty
+ checkEmptyUpToIndex(writerList, HALF_SAMPLE_SIZE);
+
+ // Validate that the second half of the list retains its data
+ checkData(writerList, HALF_SAMPLE_SIZE, SAMPLE_SIZE);
+
+ // Write the modified long list to a file and verify its existence
+ final String TEMP_FILE_NAME =
+ String.format("testShrinkListMinValidIndex_write_%s_read_back_%s.ll", writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // If using LongListDisk, verify that the file size reflects the shrink operation
+ if (writerList instanceof LongListDisk) {
+ assertEquals(
+ HALF_SAMPLE_SIZE * Long.BYTES,
+ originalFileSize - Files.size(longListFile),
+ "File size after shrinking does not match expected reduction.");
+ }
+
+ // Reconstruct the LongList from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate that all entries in the reconstructed list match the writer list
+ for (int i = 0; i < SAMPLE_SIZE; i++) {
+ assertEquals(
+ writerList.get(i),
+ readerList.get(i),
+ "Mismatch in data for index " + i + " between writer and reader lists.");
+ }
+ } finally {
+ // Clean up the temporary file
+ Files.delete(longListFile);
+ }
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}")
+ @MethodSource("longListWriterReaderPairsProvider")
+ void testShrinkListMaxValidIndex(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final AbstractLongList> writerList = writerFactory.createInstance.get()) {
+ // Populate the long list with initial values and validate its contents
+ populateList(writerList);
+ checkData(writerList);
+
+ // Save the original file size for comparison (for LongListDisk)
+ // temporary file channel doesn't contain the header
+ long originalFileSize = 0;
+ if (writerList instanceof LongListDisk) {
+ originalFileSize =
+ ((LongListDisk) writerList).getCurrentFileChannel().size() + writerList.currentFileHeaderSize;
+ }
+
+ // Update the valid range to shrink the list by setting a new maximum valid index
+ writerList.updateValidRange(0, HALF_SAMPLE_SIZE - 1);
+
+ // Validate that the second half of the list is now empty
+ checkEmptyFromIndex(writerList, HALF_SAMPLE_SIZE, SAMPLE_SIZE);
+
+ // Validate that the first half of the list retains its data
+ checkData(writerList, 0, HALF_SAMPLE_SIZE);
+
+ // Write the modified long list to a file and verify its existence
+ final String TEMP_FILE_NAME =
+ String.format("testShrinkListMaxValidIndex_write_%s_read_back_%s.ll", writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // If using LongListDisk, verify that the file size reflects the shrink operation
+ if (writerList instanceof LongListDisk) {
+ assertEquals(
+ HALF_SAMPLE_SIZE * Long.BYTES,
+ originalFileSize - Files.size(longListFile),
+ "File size after shrinking does not match expected reduction.");
+ }
+
+ // Reconstruct the LongList from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate that all entries in the reconstructed list match the writer list
+ for (int i = 0; i < SAMPLE_SIZE; i++) {
+ assertEquals(
+ writerList.get(i),
+ readerList.get(i),
+ "Mismatch in data for index " + i + " between writer and reader lists.");
+ }
+ } finally {
+ // Clean up the temporary file
+ Files.delete(longListFile);
+ }
+ }
+ }
+
+ /**
+ * Takes a stream of (writerFactory, readerFactory) pairs,
+ * and for each pair, returns multiple (writerFactory, readerFactory, secondReaderFactory) triples.
+ *
+ * @param writerReaderPairs a stream of (writerFactory, readerFactory) pairs
+ * @return a stream of argument triples (writerFactory, readerFactory, secondReaderFactory)
+ */
+ protected static Stream longListWriterSecondReaderPairsProviderBase(
+ final Stream writerReaderPairs) {
+ // “Expand” each (writerFactory, readerFactory) into (writerFactory, readerFactory, secondReaderFactory).
+ return writerReaderPairs.flatMap(pair -> {
+ // The existing pair is [writerFactory, readerFactory].
+ final Object writerFactory = pair.get()[0];
+ final Object readerFactory = pair.get()[1];
+
+ // Now, produce multiple outputs, each with a different secondReader:
+ return Stream.of(
+ Arguments.of(writerFactory, readerFactory, heapReaderFactory),
+ Arguments.of(writerFactory, readerFactory, offHeapReaderFactory),
+ Arguments.of(writerFactory, readerFactory, diskReaderFactory));
});
+ }
- assertEquals(
- getSampleSize(),
- longList.stream().parallel().summaryStatistics().getCount(),
- "Stream size should match initial sample size");
+ @ParameterizedTest(name = "[{index}] Writer={0}, First Reader={1}, Second Reader={2}")
+ @MethodSource("longListWriterSecondReaderPairsProvider")
+ void testUpdateListCreatedFromSnapshotPersistAndVerify(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ final LongListReaderFactory secondReaderFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList
+ try (final AbstractLongList> writerList = writerFactory.createInstance.get()) {
+
+ // Populate the writer list with initial values and validate its contents
+ populateList(writerList);
+ checkData(writerList);
+
+ // Write the writer list to a file and verify its existence
+ final String TEMP_FILE_NAME = String.format(
+ "testUpdateListCreatedFromSnapshotPersistAndVerify_write_%s_read_back_%s_read_again_%s.ll",
+ writerFactory, readerFactory, secondReaderFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Check that the file size matches the expected data size
+ assertEquals(
+ (FILE_HEADER_SIZE_V2 + (Long.BYTES * (long) SAMPLE_SIZE)),
+ Files.size(longListFile),
+ "Expected file to contain all the data so its size [" + Files.size(longListFile)
+ + "] should have been header plus longs data size ["
+ + (FILE_HEADER_SIZE_V2 + (Long.BYTES * (SAMPLE_SIZE)))
+ + "]");
+
+ // Reconstruct the list from the file using the first reader implementation
+ try (final AbstractLongList> longListFromFile =
+ readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+
+ // Validate the reconstructed list's attributes
+ assertEquals(
+ writerList.capacity(), longListFromFile.capacity(), "Capacity mismatch in reconstructed list.");
+ assertEquals(writerList.size(), longListFromFile.size(), "Size mismatch in reconstructed list.");
+ // Validate that the number of chunks matches between the writer and reader lists
+ assertEquals(
+ writerList.dataCopy().size(),
+ longListFromFile.dataCopy().size(),
+ "Mismatch in the number of chunks between writer and reader lists.");
+ checkData(longListFromFile);
+
+ // Rewrite the data from the first reconstructed list back to the file
+ Files.delete(longListFile);
+ longListFromFile.writeToFile(longListFile);
+
+ // Reconstruct the list again using the second reader implementation
+ try (final AbstractLongList> longListFromFile2 =
+ secondReaderFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+
+ // Validate that the second reconstruction matches the writer list
+ // Validate the reconstructed list's attributes
+ assertEquals(
+ writerList.capacity(),
+ longListFromFile2.capacity(),
+ "Capacity mismatch in reconstructed list.");
+ assertEquals(writerList.size(), longListFromFile2.size(), "Size mismatch in reconstructed list.");
+ // Validate that the number of chunks matches between the writer and reader lists
+ assertEquals(
+ writerList.dataCopy().size(),
+ longListFromFile2.dataCopy().size(),
+ "Mismatch in the number of chunks between writer and reader lists.");
+ checkData(longListFromFile2);
+ }
+ }
+ }
}
- protected void checkNumLongsPerChunk(final AbstractLongList> subject, final int expected) {
- assertEquals(
- expected,
- subject.getNumLongsPerChunk(),
- "On-heap implementations should respect constructor parameter for numLongsPerChunk");
+ @ParameterizedTest(name = "[{index}] Writer={0}, First Reader={1}, Second Reader={2}")
+ @MethodSource("longListWriterSecondReaderPairsProvider")
+ void testUpdateMinToTheLowerEnd(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ final LongListReaderFactory secondReaderFactory,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer long list with 10 longs per chunk
+ try (final LongList writerList = createFullyParameterizedLongListWith(10, SAMPLE_SIZE)) {
+ // Populate the list and validate its initial data
+ populateList(writerList);
+ checkData(writerList);
+
+ // Update the minimum valid index to exclude the lower half of the list
+ //noinspection UnnecessaryLocalVariable
+ int newMinValidIndex = HALF_SAMPLE_SIZE;
+ writerList.updateValidRange(newMinValidIndex, MAX_VALID_INDEX);
+
+ // Write the updated list to a file and verify its existence
+ final String TEMP_FILE_NAME =
+ String.format("testUpdateMinToTheLowerEnd_write_%s_read_back_%s.ll", writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Reconstruct the list from the file using the first reader
+ try (final LongList halfEmptyList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+
+ // Verify that long list is half-empty
+ checkEmptyUpToIndex(halfEmptyList, newMinValidIndex);
+
+ // Validate that indices above the new minimum are still intact
+ checkData(halfEmptyList, newMinValidIndex, SAMPLE_SIZE);
+
+ // Test behavior when attempting to update indices below the minimum valid index
+ int belowMinValidIndex1 = newMinValidIndex - 1;
+ int belowMinValidIndex2 = newMinValidIndex - 2;
+ int belowMinIndexValue1 = nextInt();
+ int belowMinIndexValue2 = nextInt();
+
+ // Attempt to put values below the minimum valid index; expect errors
+ assertThrows(AssertionError.class, () -> halfEmptyList.put(belowMinValidIndex1, belowMinIndexValue1));
+ assertFalse(halfEmptyList.putIfEqual(belowMinValidIndex2, IMPERMISSIBLE_VALUE, belowMinIndexValue2));
+
+ // Update the valid range to include all indices
+ halfEmptyList.updateValidRange(0, MAX_VALID_INDEX);
+
+ // Now, inserting at previously excluded indices should succeed
+ halfEmptyList.put(belowMinValidIndex1, belowMinIndexValue1);
+ assertEquals(belowMinIndexValue1, halfEmptyList.get(belowMinValidIndex1));
+ assertTrue(halfEmptyList.putIfEqual(belowMinValidIndex2, IMPERMISSIBLE_VALUE, belowMinIndexValue2));
+ assertEquals(belowMinIndexValue2, halfEmptyList.get(belowMinValidIndex2));
+
+ // Force creation of an additional chunk
+ final int INDEX_OFFSET = 10;
+ halfEmptyList.put(belowMinValidIndex2 - INDEX_OFFSET, belowMinIndexValue2);
+
+ // Write the updated list to a new file and verify its existence
+ final String TEMP_FILE_NAME_2 = String.format(
+ "testUpdateMinToTheLowerEnd_2_write_%s_read_back_%s.ll", readerFactory, secondReaderFactory);
+ final Path longListFile2 = writeLongListToFileAndVerify(halfEmptyList, TEMP_FILE_NAME_2, tempDir);
+
+ // Reconstruct the list again using the second reader
+ try (final LongList zeroMinValidIndexList =
+ secondReaderFactory.createFromFile().apply(longListFile2, CONFIGURATION)) {
+ // Verify that indices up to the new offset are empty
+ checkEmptyUpToIndex(zeroMinValidIndexList, belowMinValidIndex2 - INDEX_OFFSET);
+
+ // Validate all data above the midpoint is intact
+ checkData(zeroMinValidIndexList, HALF_SAMPLE_SIZE, SAMPLE_SIZE);
+
+ // Verify all indices are correctly restored after updating values below the minimum
+ for (int i = 0; i < newMinValidIndex; i++) {
+ assertEquals(
+ halfEmptyList.get(i),
+ zeroMinValidIndexList.get(i),
+ "Mismatch in reconstructed list data.");
+ zeroMinValidIndexList.put(i, i + 100); // Refill the list
+ }
+
+ // Validate the refilled list
+ checkData(zeroMinValidIndexList);
+ }
+ } finally {
+ // Clean up temporary files
+ Files.deleteIfExists(longListFile);
+ }
+ }
+ }
+
+ /**
+ * Combines writer-reader pairs with predefined range configurations for testing.
+ *
+ * @param writerReaderPairs a stream of writer-reader pairs
+ * @return a stream of arguments combining writer-reader pairs with range parameters
+ */
+ protected static Stream longListWriterReaderRangePairsProviderBase(
+ final Stream writerReaderPairs) {
+ return writerReaderPairs.flatMap(pair -> {
+ Object writerFactory = pair.get()[0];
+ Object readerFactory = pair.get()[1];
+
+ return Stream.of(
+ // writerFactory, readerFactory, startIndex, endIndex, numLongsPerChunk, maxLongs
+ Arguments.of(writerFactory, readerFactory, 1, 1, 100, 1000),
+ Arguments.of(writerFactory, readerFactory, 1, 5, 100, 1000),
+ Arguments.of(writerFactory, readerFactory, 150, 150, 100, 1000),
+ Arguments.of(writerFactory, readerFactory, 150, 155, 100, 1000));
+ });
+ }
+
+ @ParameterizedTest(
+ name = "[{index}] Writer={0}, Reader={1}, startIndex={2}, endIndex={3}, numLongsPerChunk={4}, maxLongs={5}")
+ @MethodSource("longListWriterReaderRangePairsProvider")
+ void testWriteReadRangeElement(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ final int startIndex,
+ final int endIndex,
+ final int numLongsPerChunk,
+ final long maxLongs,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList with the specified number of longs per chunk and max longs
+ try (final LongList writerList = createFullyParameterizedLongListWith(numLongsPerChunk, maxLongs)) {
+ // Update the valid range to include only the specified range of indices
+ writerList.updateValidRange(0, endIndex);
+
+ // Populate the range with values
+ for (int i = startIndex; i <= endIndex; i++) {
+ writerList.put(i, i + 100);
+ }
+
+ // Write the long list to a file and verify its existence
+ final String TEMP_FILE_NAME = String.format(
+ "testWriteReadRangeElement-%d-%d-%d-%d_write_%s_read_back_%s.ll",
+ startIndex, endIndex, numLongsPerChunk, maxLongs, writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Reconstruct the long list from the file using the reader
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate that the reconstructed list has the same capacity and size
+ assertEquals(writerList.capacity(), readerList.capacity(), "Capacity mismatch in reconstructed list.");
+ assertEquals(writerList.size(), readerList.size(), "Size mismatch in reconstructed list.");
+
+ // Verify that the data in the specified range is correctly restored
+ for (int i = startIndex; i <= endIndex; i++) {
+ assertEquals(i + 100, readerList.get(i), "Mismatch in value for index " + i);
+ }
+ } finally {
+ // Clean up the temporary file
+ Files.deleteIfExists(longListFile);
+ }
+ }
+ }
+
+ /**
+ * Combines writer-reader pairs with predefined chunk offset configurations (second set) for testing.
+ *
+ * @param writerReaderPairs a stream of writer-reader pairs
+ * @return a stream of arguments combining writer-reader pairs with chunk offset parameters
+ */
+ protected static Stream longListWriterReaderOffsetPairsProviderBase(
+ final Stream writerReaderPairs) {
+ return writerReaderPairs.flatMap(pair -> {
+ Object writerFactory = pair.get()[0];
+ Object readerFactory = pair.get()[1];
+
+ return Stream.of(
+ // writerFactory, readerFactory, chunkOffset
+ Arguments.of(writerFactory, readerFactory, 0),
+ Arguments.of(writerFactory, readerFactory, 1),
+ Arguments.of(writerFactory, readerFactory, 50),
+ Arguments.of(writerFactory, readerFactory, 99),
+ Arguments.of(writerFactory, readerFactory, 100));
+ });
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}, chunkOffset={2}")
+ @MethodSource("longListWriterReaderOffsetPairsProvider")
+ void testPersistListWithNonZeroMinValidIndex(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ final int chunkOffset,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList where each chunk holds 100 longs, resulting in 100 chunks
+ try (final LongList writerList = createFullyParameterizedLongListWith(100, SAMPLE_SIZE)) {
+
+ // Populate the list with sample data and validate its initial state
+ populateList(writerList);
+ checkData(writerList);
+
+ // Update the minimum valid index to simulate a "half-empty" list with the specified chunk offset
+ writerList.updateValidRange(HALF_SAMPLE_SIZE + chunkOffset, writerList.size() - 1);
+
+ // Write the modified list to a file and verify its existence
+ final String TEMP_FILE_NAME = String.format(
+ "testPersistListWithNonZeroMinValidIndex_%d_write_%s_read_back_%s.ll",
+ chunkOffset, writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Reconstruct the LongList from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate the reconstructed list's capacity and size
+ assertEquals(writerList.capacity(), readerList.capacity(), "Capacity mismatch in reconstructed list.");
+ assertEquals(writerList.size(), readerList.size(), "Size mismatch in reconstructed list.");
+
+ // Ensure the data matches between the original and reconstructed lists
+ for (int i = 0; i < readerList.size(); i++) {
+ assertEquals(
+ writerList.get(i),
+ readerList.get(i),
+ "Unexpected value in a loaded readerList, index=" + i);
+ }
+ } finally {
+ // Clean up the temporary file after the test
+ Files.deleteIfExists(longListFile);
+ }
+ }
+ }
+
+ @ParameterizedTest(name = "[{index}] Writer={0}, Reader={1}, chunkOffset={2}")
+ @MethodSource("longListWriterReaderOffsetPairsProvider")
+ void testPersistShrunkList(
+ final LongListWriterFactory writerFactory,
+ final LongListReaderFactory readerFactory,
+ final int chunkOffset,
+ @TempDir final Path tempDir)
+ throws IOException {
+
+ // Create a writer LongList where each chunk holds 100 longs, resulting in 100 chunks
+ try (final LongList writerList = createFullyParameterizedLongListWith(100, SAMPLE_SIZE)) {
+
+ // Populate the list with sample data and validate its initial state
+ populateList(writerList);
+ checkData(writerList);
+
+ // Shrink the valid range of the list to simulate partial truncation
+ writerList.updateValidRange(0, HALF_SAMPLE_SIZE + chunkOffset);
+
+ // Write the modified list to a file and verify its existence
+ final String TEMP_FILE_NAME = String.format(
+ "testPersistShrunkList_%d_write_%s_read_back_%s.ll", chunkOffset, writerFactory, readerFactory);
+ final Path longListFile = writeLongListToFileAndVerify(writerList, TEMP_FILE_NAME, tempDir);
+
+ // Reconstruct the LongList from the file
+ try (final LongList readerList = readerFactory.createFromFile().apply(longListFile, CONFIGURATION)) {
+ // Validate the reconstructed list's capacity and size
+ assertEquals(writerList.capacity(), readerList.capacity(), "Capacity mismatch in reconstructed list.");
+ assertEquals(writerList.size(), readerList.size(), "Size mismatch in reconstructed list.");
+
+ // Ensure the data matches between the original and reconstructed lists
+ for (int i = 0; i < readerList.size(); i++) {
+ assertEquals(
+ writerList.get(i),
+ readerList.get(i),
+ "Unexpected value in a loaded readerList, index=" + i);
+ }
+ } finally {
+ // Clean up the temporary file after the test
+ Files.deleteIfExists(longListFile);
+ }
+ }
+ }
+
+ // Utility methods
+
+ @SuppressWarnings("UnusedReturnValue")
+ static T populateList(T longList) {
+ return populateList(longList, SAMPLE_SIZE);
+ }
+
+ static T populateList(T longList, int sampleSize) {
+ longList.updateValidRange(0, sampleSize - 1);
+ for (int i = 0; i < sampleSize; i++) {
+ longList.put(i, i + 100);
+ }
+ return longList;
+ }
+
+ static void checkData(final LongList longList) {
+ checkData(longList, 0, SAMPLE_SIZE);
+ }
+
+ static void checkData(final LongList longList, final int startIndex, final int endIndex) {
+ for (int i = startIndex; i < endIndex; i++) {
+ final long readValue = longList.get(i, 0);
+ assertEquals(
+ i + 100,
+ readValue,
+ "Longs don't match for " + i + " got [" + readValue + "] should be [" + i + 100 + "]");
+ }
+ }
+
+ static void checkEmptyUpToIndex(LongList longList, int index) {
+ for (int i = 0; i < index; i++) {
+ final long readValue = longList.get(i, 0);
+ assertEquals(0, readValue, "Longs don't match for " + i + " got [" + readValue + "] should be [" + 0 + "]");
+ }
+ }
+
+ @SuppressWarnings("SameParameterValue")
+ static void checkEmptyFromIndex(LongList longList, int fromIndex, int toIndex) {
+ for (int i = fromIndex; i < toIndex; i++) {
+ final long readValue = longList.get(i, 0);
+ assertEquals(0, readValue, "Longs don't match for " + i + " got [" + readValue + "] should be [" + 0 + "]");
+ }
+ }
+
+ /**
+ * Writes all longs in LongList instance to a temporary file and verifies its existence.
+ *
+ * @param longList the LongList instance to be written to the file
+ * @param fileName the name of the file to write
+ * @param tempDir the directory where the temporary file will be created
+ * @return the path to the created file
+ * @throws IOException if an I/O error occurs
+ */
+ static Path writeLongListToFileAndVerify(final LongList longList, final String fileName, final Path tempDir)
+ throws IOException {
+ final Path file = tempDir.resolve(fileName);
+
+ if (Files.exists(file)) {
+ Files.delete(file);
+ }
+
+ longList.writeToFile(file);
+
+ assertTrue(
+ Files.exists(file),
+ String.format("File '%s' does not exist after writing longs.", file.toAbsolutePath()));
+
+ return file;
}
}
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListAdHocTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListAdHocTest.java
new file mode 100644
index 000000000000..bf97ddf14f8d
--- /dev/null
+++ b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListAdHocTest.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2022-2025 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.swirlds.merkledb.collections;
+
+import static com.swirlds.merkledb.collections.AbstractLongListTest.SAMPLE_SIZE;
+import static com.swirlds.merkledb.collections.AbstractLongListTest.checkData;
+import static com.swirlds.merkledb.collections.AbstractLongListTest.populateList;
+import static com.swirlds.merkledb.collections.LongListOffHeap.DEFAULT_RESERVED_BUFFER_LENGTH;
+import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.CONFIGURATION;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+/**
+ * Provides specialized or ad hoc tests for certain edge cases in {@link LongList} implementations.
+ * These scenarios do not neatly fit into the broader, cross-compatibility tests found in
+ * {@link AbstractLongListTest}, but still warrant individual coverage for bug fixes or
+ * concurrency concerns.
+ */
+class LongListAdHocTest {
+
+ @ParameterizedTest
+ @MethodSource("provideLongLists")
+ void test4089(final AbstractLongList> list) {
+ list.updateValidRange(0, list.maxLongs - 1);
+ // Issue #4089: ArrayIndexOutOfBoundsException from VirtualMap.put()
+ final long maxLongs = list.maxLongs;
+ final int defaultValue = -1;
+ final AtomicBoolean done = new AtomicBoolean();
+
+ IntStream.range(0, 2).parallel().forEach(thread -> {
+ if (thread == 0) {
+ // Getter
+ while (!done.get()) {
+ assertEquals(defaultValue, list.get(maxLongs - 2, defaultValue), "Value should be whats expected.");
+ }
+ } else {
+ // Putter
+ list.put(maxLongs - 1, 1);
+ done.set(true);
+ }
+ });
+ }
+
+ static Stream provideLongLists() {
+ final int numLongsPerChunk = 32;
+ final int maxLongs = numLongsPerChunk * 4096;
+ return Stream.of(
+ new LongListHeap(numLongsPerChunk, maxLongs, 0),
+ new LongListOffHeap(numLongsPerChunk, maxLongs, DEFAULT_RESERVED_BUFFER_LENGTH));
+ }
+
+ // Tests https://github.com/hashgraph/hedera-services/issues/16860
+ @Test
+ void testReallocateThreadLocalBufferWhenMemoryChunkSizeChanges() throws IOException {
+ // Create two long lists with different memory chunk sizes
+ var largeMemoryChunkList = new LongListDisk(100, SAMPLE_SIZE * 2, 0, CONFIGURATION);
+ var smallMemoryChunkList = new LongListDisk(10, SAMPLE_SIZE * 2, 0, CONFIGURATION);
+
+ // Populate both long lists with sample data and validate
+ populateList(largeMemoryChunkList);
+ checkData(largeMemoryChunkList);
+ populateList(smallMemoryChunkList);
+ checkData(smallMemoryChunkList);
+
+ // Capture the original file channel sizes before closing chunks
+ final long originalLargeListChannelSize =
+ largeMemoryChunkList.getCurrentFileChannel().size();
+ final long originalSmallListChannelSize =
+ smallMemoryChunkList.getCurrentFileChannel().size();
+
+ // Close all chunks in long lists
+ for (int i = 0; i < largeMemoryChunkList.chunkList.length(); i++) {
+ final Long chunk = largeMemoryChunkList.chunkList.get(i);
+ if (chunk != null) {
+ largeMemoryChunkList.closeChunk(chunk);
+ }
+ }
+ for (int i = 0; i < smallMemoryChunkList.chunkList.length(); i++) {
+ final Long chunk = smallMemoryChunkList.chunkList.get(i);
+ if (chunk != null) {
+ smallMemoryChunkList.closeChunk(chunk);
+ }
+ }
+
+ // Ensure that file channel sizes have not inadvertently grown
+ assertEquals(
+ originalLargeListChannelSize,
+ largeMemoryChunkList.getCurrentFileChannel().size());
+ assertEquals(
+ originalSmallListChannelSize,
+ smallMemoryChunkList.getCurrentFileChannel().size());
+
+ // Tear down
+ largeMemoryChunkList.close();
+ largeMemoryChunkList.resetTransferBuffer();
+ smallMemoryChunkList.close();
+ smallMemoryChunkList.resetTransferBuffer();
+ }
+}
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListDiskTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListDiskTest.java
index 45162daf7cea..dec87861b466 100644
--- a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListDiskTest.java
+++ b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListDiskTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2024-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,436 +16,86 @@
package com.swirlds.merkledb.collections;
-import static com.swirlds.common.test.fixtures.RandomUtils.nextInt;
-import static com.swirlds.merkledb.collections.LongList.IMPERMISSIBLE_VALUE;
+import static com.swirlds.base.units.UnitConstants.MEBIBYTES_TO_BYTES;
+import static com.swirlds.merkledb.collections.AbstractLongList.DEFAULT_MAX_LONGS_TO_STORE;
import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.CONFIGURATION;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import com.swirlds.common.test.fixtures.io.ResourceLoader;
import java.io.IOException;
-import java.net.URISyntaxException;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.util.stream.Stream;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
-import org.junit.jupiter.params.provider.MethodSource;
-@SuppressWarnings("FieldCanBeLocal")
-class LongListDiskTest {
- private static final int SAMPLE_SIZE = 10_000;
- public static final int MAX_VALID_INDEX = SAMPLE_SIZE - 1;
- public static final int HALF_SAMPLE_SIZE = SAMPLE_SIZE / 2;
- public static final int NUM_LONGS_PER_CHUNK = 10;
- /**
- * Temporary directory provided by JUnit
- */
- @SuppressWarnings("unused")
- @TempDir
- Path testDirectory;
-
- LongListDisk longListDisk;
-
- @Test
- void createOffHeapReadBack() throws IOException {
- final LongListOffHeap longListOffHeap = populateList(new LongListOffHeap(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0));
- checkData(longListOffHeap);
- final Path tempFile = testDirectory.resolve("createOffHeapReadBack.ll");
- if (Files.exists(tempFile)) {
- Files.delete(tempFile);
- }
- longListOffHeap.writeToFile(tempFile);
- // now open file with
- try {
- longListDisk = new LongListDisk(tempFile, CONFIGURATION);
- assertEquals(longListOffHeap.size(), longListDisk.size(), "Unexpected value for longListDisk.size()");
- checkData(longListDisk);
- longListDisk.resetTransferBuffer();
- } finally {
- // cleanup
- Files.delete(tempFile);
- }
- }
-
- @Test
- void createHeapReadBack() throws IOException {
- final LongListHeap longListHeap = populateList(new LongListHeap(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0));
- checkData(longListHeap);
- final Path tempFile = testDirectory.resolve("createHeapReadBack.ll");
- if (Files.exists(tempFile)) {
- Files.delete(tempFile);
- }
- longListHeap.writeToFile(tempFile);
- // now open file with
- try {
- longListDisk = new LongListDisk(tempFile, CONFIGURATION);
- assertEquals(longListHeap.size(), longListDisk.size(), "Unexpected value for longListDisk.size()");
- checkData(longListDisk);
- longListDisk.resetTransferBuffer();
- } finally {
- // cleanup
- Files.delete(tempFile);
- }
- }
-
- @ParameterizedTest
- @MethodSource("inMemoryLongListProvider")
- void createHalfEmptyLongListInMemoryReadBack(LongList longList, int chunkOffset) throws IOException {
- populateList(longList);
- checkData(longList);
-
- int newMinValidIndex = HALF_SAMPLE_SIZE + chunkOffset;
- longList.updateValidRange(newMinValidIndex, MAX_VALID_INDEX);
- final Path tempFile = testDirectory.resolve(String.format(
- "LongListDiskTest_half_empty_%s_%d.ll", longList.getClass().getSimpleName(), chunkOffset));
- if (Files.exists(tempFile)) {
- Files.delete(tempFile);
- }
- longList.writeToFile(tempFile);
- // now open file with
- try {
- longListDisk = new LongListDisk(tempFile, CONFIGURATION);
- assertEquals(longList.size(), longListDisk.size(), "Unexpected value for longListDisk.size()");
- checkEmptyUpToIndex(longListDisk, newMinValidIndex);
- checkData(longListDisk, newMinValidIndex, SAMPLE_SIZE);
- longListDisk.resetTransferBuffer();
- } finally {
- // cleanup
- Files.delete(tempFile);
- }
- }
-
- public static Stream inMemoryLongListProvider() {
- return Stream.of(
- Arguments.of(new LongListOffHeap(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0), 0),
- Arguments.of(new LongListOffHeap(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0), 5),
- Arguments.of(new LongListHeap(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0), 0),
- Arguments.of(new LongListHeap(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0), 5));
- }
-
- @Test
- void updateMinToTheLowerEnd() throws IOException {
- longListDisk = populateList(new LongListDisk(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0, CONFIGURATION));
- checkData(longListDisk);
- int newMinValidIndex = HALF_SAMPLE_SIZE;
- longListDisk.updateValidRange(newMinValidIndex, MAX_VALID_INDEX);
-
- final Path halfEmptyListFile = testDirectory.resolve("LongListDiskTest_half_empty.ll");
- if (Files.exists(halfEmptyListFile)) {
- Files.delete(halfEmptyListFile);
- }
- longListDisk.writeToFile(halfEmptyListFile);
-
- try (LongListDisk halfEmptyList = new LongListDisk(halfEmptyListFile, CONFIGURATION)) {
- // check that it's half-empty indeed
- checkEmptyUpToIndex(halfEmptyList, newMinValidIndex);
- // and half-full
- checkData(halfEmptyList, HALF_SAMPLE_SIZE, SAMPLE_SIZE);
-
- // if we try to put a value below min valid index, the operation should fail with AssertionError
- int belowMinValidIndex1 = newMinValidIndex - 1;
- int belowMinValidIndex2 = newMinValidIndex - 2;
- int belowMinIndexValue1 = nextInt();
- int belowMinIndexValue2 = nextInt();
- assertThrows(AssertionError.class, () -> halfEmptyList.put(belowMinValidIndex1, belowMinIndexValue1));
- // doesn't throw an AssertionError, but returns false
- assertFalse(halfEmptyList.putIfEqual(belowMinValidIndex2, IMPERMISSIBLE_VALUE, belowMinIndexValue2));
-
- // however, once we update min valid index, we should be able to put values below it
- halfEmptyList.updateValidRange(0, MAX_VALID_INDEX);
- halfEmptyList.put(belowMinValidIndex1, belowMinIndexValue1);
- assertEquals(belowMinIndexValue1, halfEmptyList.get(belowMinValidIndex1));
-
- assertTrue(halfEmptyList.putIfEqual(belowMinValidIndex2, IMPERMISSIBLE_VALUE, belowMinIndexValue2));
- assertEquals(belowMinIndexValue2, halfEmptyList.get(belowMinValidIndex2));
+public class LongListDiskTest extends AbstractLongListTest {
- // forcing to create one more chunk
- halfEmptyList.put(belowMinValidIndex2 - NUM_LONGS_PER_CHUNK, belowMinIndexValue2);
-
- // check that it still works after restoring from a file
- final Path zeroMinValidIndex = testDirectory.resolve("LongListDiskTest_zero_min_valid_index.ll");
- if (Files.exists(zeroMinValidIndex)) {
- Files.delete(zeroMinValidIndex);
- }
- halfEmptyList.writeToFile(zeroMinValidIndex);
-
- try (LongListDisk zeroMinValidIndexList = new LongListDisk(zeroMinValidIndex, CONFIGURATION)) {
- checkEmptyUpToIndex(zeroMinValidIndexList, belowMinValidIndex2 - NUM_LONGS_PER_CHUNK);
- checkData(zeroMinValidIndexList, HALF_SAMPLE_SIZE, SAMPLE_SIZE);
-
- for (int i = 0; i < newMinValidIndex; i++) {
- // assert the content is the same
- assertEquals(halfEmptyList.get(i), zeroMinValidIndexList.get(i));
-
- // refill the list
- zeroMinValidIndexList.put(i, i + 100);
- }
-
- // make sure that the refilled list works as expected
- checkData(zeroMinValidIndexList);
- }
- }
- }
-
- @Test
- void createDiskReadBack() throws IOException {
- longListDisk = new LongListDisk(NUM_LONGS_PER_CHUNK, SAMPLE_SIZE, 0, CONFIGURATION);
- populateList(longListDisk);
- checkData(longListDisk);
- // test changing data with putIf
- assertTrue(longListDisk.putIfEqual(10, 110, 123), "Unexpected value from putIfEqual()");
- assertEquals(123, longListDisk.get(10, -1), "Unexpected value from longListDisk.get(10)");
- assertFalse(longListDisk.putIfEqual(10, 110, 345), "Unexpected value from putIfEqual() #2");
- longListDisk.put(10, 110); // put back
-
- final Path lsitFile = testDirectory.resolve("createDiskReadBack.ll");
- if (Files.exists(lsitFile)) {
- Files.delete(lsitFile);
- }
- longListDisk.writeToFile(lsitFile);
- long listSize = longListDisk.size();
- // close
- longListDisk.close();
- // now open file with
-
- try (final LongListDisk longListDiskRestored = new LongListDisk(lsitFile, CONFIGURATION)) {
- assertEquals(listSize, longListDiskRestored.size(), "Unexpected value from longListDiskRestored.size()");
- checkData(longListDiskRestored);
- }
+ @Override
+ protected LongListDisk createLongList() {
+ return new LongListDisk(CONFIGURATION);
}
- @Test
- void testBackwardCompatibility_halfEmpty() throws URISyntaxException, IOException {
- final Path pathToList = ResourceLoader.getFile("test_data/LongListOffHeapHalfEmpty_10k_10pc_v1.ll");
- longListDisk = new LongListDisk(pathToList, 0, CONFIGURATION);
- // half-empty
- checkEmptyUpToIndex(longListDisk, HALF_SAMPLE_SIZE);
- // half-full
- for (int i = HALF_SAMPLE_SIZE; i < SAMPLE_SIZE; i++) {
- assertEquals(i, longListDisk.get(i));
- }
+ @Override
+ protected LongListDisk createLongListWithChunkSizeInMb(int chunkSizeInMb) {
+ final int impliedLongsPerChunk = Math.toIntExact((chunkSizeInMb * (long) MEBIBYTES_TO_BYTES) / Long.BYTES);
+ return new LongListDisk(impliedLongsPerChunk, DEFAULT_MAX_LONGS_TO_STORE, 0, CONFIGURATION);
}
- @Test
- void testShrinkList_minValidIndex() throws IOException {
- longListDisk = new LongListDisk(10, SAMPLE_SIZE * 2, 0, CONFIGURATION);
- populateList(longListDisk);
- checkData(longListDisk, 0, SAMPLE_SIZE);
- // temporary file channel doesn't contain the header
- final long originalFileSize = longListDisk.getCurrentFileChannel().size() + longListDisk.currentFileHeaderSize;
-
- longListDisk.updateValidRange(HALF_SAMPLE_SIZE, SAMPLE_SIZE * 2 - 1);
-
- // half-empty
- checkEmptyUpToIndex(longListDisk, HALF_SAMPLE_SIZE);
- // half-full
- checkData(longListDisk, HALF_SAMPLE_SIZE, SAMPLE_SIZE);
-
- final Path shrunkListFile = testDirectory.resolve("testShrinkList_minValidIndex.ll");
- if (Files.exists(shrunkListFile)) {
- Files.delete(shrunkListFile);
- }
- // if we write to the same file, it doesn't shrink after the min valid index update
- longListDisk.writeToFile(shrunkListFile);
- assertEquals(HALF_SAMPLE_SIZE * Long.BYTES, originalFileSize - Files.size(shrunkListFile));
-
- try (final LongListDisk loadedList = new LongListDisk(shrunkListFile, 0, CONFIGURATION)) {
- for (int i = 0; i < SAMPLE_SIZE; i++) {
- assertEquals(
- longListDisk.get(i),
- loadedList.get(i),
- "Unexpected value in a loaded longListDisk, index=" + i);
- }
- }
+ @Override
+ protected LongListDisk createFullyParameterizedLongListWith(int numLongsPerChunk, long maxLongs) {
+ return new LongListDisk(numLongsPerChunk, maxLongs, 0, CONFIGURATION);
}
- @Test
- void testShrinkList_maxValidIndex() throws IOException {
- longListDisk = new LongListDisk(10, SAMPLE_SIZE * 2, 0, CONFIGURATION);
- populateList(longListDisk);
- checkData(longListDisk, 0, SAMPLE_SIZE);
- // temporary file channel doesn't contain the header
- final long originalFileSize = longListDisk.getCurrentFileChannel().size() + longListDisk.currentFileHeaderSize;
-
- longListDisk.updateValidRange(0, HALF_SAMPLE_SIZE - 1);
-
- // half-empty
- checkEmptyFromIndex(longListDisk, HALF_SAMPLE_SIZE);
- // half-full
- checkData(longListDisk, 0, HALF_SAMPLE_SIZE - 1);
-
- final Path shrunkListFile = testDirectory.resolve("testShrinkList_maxValidIndex.ll");
- if (Files.exists(shrunkListFile)) {
- Files.delete(shrunkListFile);
- }
- // if we write to the same file, it doesn't shrink after the min valid index update
- longListDisk.writeToFile(shrunkListFile);
- assertEquals(HALF_SAMPLE_SIZE * Long.BYTES, originalFileSize - Files.size(shrunkListFile));
-
- try (final LongListDisk loadedList = new LongListDisk(shrunkListFile, 0, CONFIGURATION)) {
- for (int i = 0; i < SAMPLE_SIZE; i++) {
- assertEquals(longListDisk.get(i), loadedList.get(i), "Unexpected value in a loaded longListDisk");
- }
- }
+ @Override
+ protected LongListDisk createLongListFromFile(Path file) throws IOException {
+ return new LongListDisk(file, CONFIGURATION);
}
- @Test
- void testReuseOfChunks_minValidIndex() throws IOException {
- longListDisk = new LongListDisk(100, SAMPLE_SIZE * 2, 0, CONFIGURATION);
- populateList(longListDisk);
- checkData(longListDisk, 0, SAMPLE_SIZE);
- // temporary file channel doesn't contain the header
- final long originalChannelSize = longListDisk.getCurrentFileChannel().size();
-
- // freeing up some chunks
- longListDisk.updateValidRange(HALF_SAMPLE_SIZE, SAMPLE_SIZE * 2 - 1);
-
- // using the freed up chunks
- for (int i = SAMPLE_SIZE; i < SAMPLE_SIZE + HALF_SAMPLE_SIZE; i++) {
- longListDisk.put(i, i + 100);
- }
-
- // a longListDisk should have the same size as before because it has the same number of entries
- assertEquals(originalChannelSize, longListDisk.getCurrentFileChannel().size());
-
- checkEmptyUpToIndex(longListDisk, HALF_SAMPLE_SIZE);
- checkData(longListDisk, HALF_SAMPLE_SIZE, SAMPLE_SIZE + HALF_SAMPLE_SIZE);
- }
-
- @Test
- void testReuseOfChunks_maxValidIndex() throws IOException {
- longListDisk = new LongListDisk(100, SAMPLE_SIZE * 2, 0, CONFIGURATION);
- populateList(longListDisk);
- checkData(longListDisk, 0, SAMPLE_SIZE);
- // temporary file channel doesn't contain the header
- final long originalChannelSize = longListDisk.getCurrentFileChannel().size();
-
- // freeing up some chunks
- longListDisk.updateValidRange(0, HALF_SAMPLE_SIZE);
-
- // using the freed up chunks
- longListDisk.updateValidRange(0, SAMPLE_SIZE - 1);
- for (int i = HALF_SAMPLE_SIZE; i < SAMPLE_SIZE; i++) {
- longListDisk.put(i, i + 100);
- }
-
- // a longListDisk should have the same size as before because it has the same number of entries
- assertEquals(originalChannelSize, longListDisk.getCurrentFileChannel().size());
-
- checkData(longListDisk, 0, SAMPLE_SIZE);
- }
-
- @Test
- void testReallocateThreadLocalBufferWhenMemoryChunkSizeChanges() throws IOException {
- // Create two long lists with different memory chunk sizes
- var largeMemoryChunkList = new LongListDisk(100, SAMPLE_SIZE * 2, 0, CONFIGURATION);
- var smallMemoryChunkList = new LongListDisk(10, SAMPLE_SIZE * 2, 0, CONFIGURATION);
-
- // Populate both long lists with sample data and validate
- populateList(largeMemoryChunkList);
- checkData(largeMemoryChunkList, 0, SAMPLE_SIZE);
- populateList(smallMemoryChunkList);
- checkData(smallMemoryChunkList, 0, SAMPLE_SIZE);
-
- // Capture the original file channel sizes before closing chunks
- final long originalLargeListChannelSize =
- largeMemoryChunkList.getCurrentFileChannel().size();
- final long originalSmallListChannelSize =
- smallMemoryChunkList.getCurrentFileChannel().size();
-
- // Close all chunks in long lists
- for (int i = 0; i < largeMemoryChunkList.chunkList.length(); i++) {
- final Long chunk = largeMemoryChunkList.chunkList.get(i);
- if (chunk != null) {
- largeMemoryChunkList.closeChunk(chunk);
- }
- }
- for (int i = 0; i < smallMemoryChunkList.chunkList.length(); i++) {
- final Long chunk = smallMemoryChunkList.chunkList.get(i);
- if (chunk != null) {
- smallMemoryChunkList.closeChunk(chunk);
- }
- }
-
- // Ensure that file channel sizes have not inadvertently grown
- assertEquals(
- originalLargeListChannelSize,
- largeMemoryChunkList.getCurrentFileChannel().size());
- assertEquals(
- originalSmallListChannelSize,
- smallMemoryChunkList.getCurrentFileChannel().size());
-
- // Tear down
- largeMemoryChunkList.close();
- largeMemoryChunkList.resetTransferBuffer();
- smallMemoryChunkList.close();
- smallMemoryChunkList.resetTransferBuffer();
- }
-
- @Test
- void testBigIndex() throws IOException {
- try (LongListDisk list = new LongListDisk(CONFIGURATION)) {
- long bigIndex = Integer.MAX_VALUE + 1L;
- list.updateValidRange(bigIndex, bigIndex);
- list.put(bigIndex, 1);
-
- assertEquals(1, list.get(bigIndex));
- final Path file = testDirectory.resolve("LongListLargeIndex.ll");
- if (Files.exists(file)) {
- Files.delete(file);
- }
- list.writeToFile(file);
- try (LongListDisk listFromFile = new LongListDisk(file, CONFIGURATION)) {
- assertEquals(1, listFromFile.get(bigIndex));
- }
- }
- }
-
- @AfterEach
- public void tearDown() {
- if (longListDisk != null) {
- longListDisk.close();
- longListDisk.resetTransferBuffer();
- }
- }
-
- private static void checkData(final LongList longList) {
- checkData(longList, 0, LongListDiskTest.SAMPLE_SIZE);
- }
-
- private static void checkData(final LongList longList, final int startIndex, final int endIndex) {
- for (int i = startIndex; i < endIndex; i++) {
- assertEquals(i + 100, longList.get(i, -1), "Unexpected value from longList.get(" + i + ")");
- }
- }
-
- private static T populateList(T longList) {
- return populateList(longList, SAMPLE_SIZE);
+ /**
+ * Provides a stream of writer-reader pairs specifically for the {@link LongListDisk} implementation.
+ * The writer is always {@link LongListDisk}, and it is paired with three reader implementations
+ * (heap, off-heap, and disk-based). This allows for testing whether data written by the
+ * {@link LongListDisk} can be correctly read back by all supported long list implementations.
+ *
+ * This method builds on {@link AbstractLongListTest#longListWriterBasedPairsProvider} to generate
+ * the specific writer-reader combinations for the {@link LongListDisk} implementation.
+ *
+ * @return a stream of argument pairs, each containing a {@link LongListDisk} writer
+ * and one of the supported reader implementations
+ */
+ static Stream longListWriterReaderPairsProvider() {
+ return longListWriterBasedPairsProvider(diskWriterFactory);
}
- private static T populateList(T longList, int sampleSize) {
- longList.updateValidRange(0, sampleSize - 1);
- for (int i = 0; i < sampleSize; i++) {
- longList.put(i, i + 100);
- }
- return longList;
+ /**
+ * Provides a stream of writer paired with two reader implementations for testing
+ * cross-compatibility.
+ *
+ * Used for {@link AbstractLongListTest#testUpdateMinToTheLowerEnd}
+ *
+ * @return a stream of arguments containing a writer and two readers.
+ */
+ static Stream longListWriterSecondReaderPairsProvider() {
+ return longListWriterSecondReaderPairsProviderBase(longListWriterReaderPairsProvider());
}
- private static void checkEmptyUpToIndex(LongList longList, int index) {
- for (int i = 0; i < index; i++) {
- assertEquals(0, longList.get(i), "Unexpected value for index " + i);
- }
+ /**
+ * Provides writer-reader pairs combined with range configurations for testing.
+ *
+ * Used for {@link AbstractLongListTest#testWriteReadRangeElement}
+ *
+ * @return a stream of arguments for range-based parameterized tests
+ */
+ static Stream longListWriterReaderRangePairsProvider() {
+ return longListWriterReaderRangePairsProviderBase(longListWriterReaderPairsProvider());
}
- private static void checkEmptyFromIndex(LongList longList, int index) {
- for (int i = index; i < SAMPLE_SIZE; i++) {
- assertEquals(0, longList.get(i), "Unexpected value for index " + i);
- }
+ /**
+ * Provides writer-reader pairs combined with chunk offset configurations (second set) for testing.
+ *
+ * Used for {@link AbstractLongListTest#testPersistListWithNonZeroMinValidIndex}
+ * and {@link AbstractLongListTest#testPersistShrunkList}
+ *
+ * @return a stream of arguments for chunk offset based parameterized tests
+ */
+ static Stream longListWriterReaderOffsetPairsProvider() {
+ return longListWriterReaderOffsetPairsProviderBase(longListWriterReaderPairsProvider());
}
}
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListHeapTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListHeapTest.java
index 2c4972333d2b..62fc153d3b68 100644
--- a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListHeapTest.java
+++ b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListHeapTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2021-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,13 +17,20 @@
package com.swirlds.merkledb.collections;
import static com.swirlds.base.units.UnitConstants.MEBIBYTES_TO_BYTES;
+import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.CONFIGURATION;
-import com.swirlds.config.api.Configuration;
import java.io.IOException;
import java.nio.file.Path;
+import java.util.stream.Stream;
+import org.junit.jupiter.params.provider.Arguments;
public class LongListHeapTest extends AbstractLongListTest {
+ @Override
+ protected LongListHeap createLongList() {
+ return new LongListHeap();
+ }
+
@Override
protected LongListHeap createLongListWithChunkSizeInMb(final int chunkSizeInMb) {
final int impliedLongsPerChunk = Math.toIntExact((chunkSizeInMb * (long) MEBIBYTES_TO_BYTES) / Long.BYTES);
@@ -36,8 +43,58 @@ protected LongListHeap createFullyParameterizedLongListWith(final int numLongsPe
}
@Override
- protected LongListHeap createLongListFromFile(final Path file, final Configuration configuration)
- throws IOException {
- return new LongListHeap(file, configuration);
+ protected LongListHeap createLongListFromFile(final Path file) throws IOException {
+ return new LongListHeap(file, CONFIGURATION);
+ }
+
+ /**
+ * Provides a stream of writer-reader pairs specifically for the {@link LongListHeap} implementation.
+ * The writer is always {@link LongListHeap}, and it is paired with three reader implementations
+ * (heap, off-heap, and disk-based). This allows for testing whether data written by the
+ * {@link LongListHeap} can be correctly read back by all supported long list implementations.
+ *
+ * This method builds on {@link AbstractLongListTest#longListWriterBasedPairsProvider} to generate
+ * the specific writer-reader combinations for the {@link LongListHeap} implementation.
+ *
+ * @return a stream of argument pairs, each containing a {@link LongListHeap} writer
+ * and one of the supported reader implementations
+ */
+ static Stream longListWriterReaderPairsProvider() {
+ return longListWriterBasedPairsProvider(heapWriterFactory);
+ }
+
+ /**
+ * Provides a stream of writer paired with two reader implementations for testing
+ * cross-compatibility.
+ *
+ * Used for {@link AbstractLongListTest#testUpdateMinToTheLowerEnd}
+ *
+ * @return a stream of arguments containing a writer and two readers.
+ */
+ static Stream longListWriterSecondReaderPairsProvider() {
+ return longListWriterSecondReaderPairsProviderBase(longListWriterReaderPairsProvider());
+ }
+
+ /**
+ * Provides writer-reader pairs combined with range configurations for testing.
+ *
+ * Used for {@link AbstractLongListTest#testWriteReadRangeElement}
+ *
+ * @return a stream of arguments for range-based parameterized tests
+ */
+ static Stream longListWriterReaderRangePairsProvider() {
+ return longListWriterReaderRangePairsProviderBase(longListWriterReaderPairsProvider());
+ }
+
+ /**
+ * Provides writer-reader pairs combined with chunk offset configurations (second set) for testing.
+ *
+ * Used for {@link AbstractLongListTest#testPersistListWithNonZeroMinValidIndex}
+ * and {@link AbstractLongListTest#testPersistShrunkList}
+ *
+ * @return a stream of arguments for chunk offset based parameterized tests
+ */
+ static Stream longListWriterReaderOffsetPairsProvider() {
+ return longListWriterReaderOffsetPairsProviderBase(longListWriterReaderPairsProvider());
}
}
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListOffHeapTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListOffHeapTest.java
index 2a0a474f0b52..2a26b98e1760 100644
--- a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListOffHeapTest.java
+++ b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListOffHeapTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2021-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,34 +18,15 @@
import static com.swirlds.base.units.UnitConstants.MEBIBYTES_TO_BYTES;
import static com.swirlds.merkledb.collections.AbstractLongList.DEFAULT_MAX_LONGS_TO_STORE;
-import static com.swirlds.merkledb.collections.AbstractLongList.DEFAULT_NUM_LONGS_PER_CHUNK;
import static com.swirlds.merkledb.test.fixtures.MerkleDbTestUtils.CONFIGURATION;
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import com.swirlds.common.test.fixtures.io.ResourceLoader;
-import com.swirlds.config.api.Configuration;
import java.io.IOException;
-import java.net.URISyntaxException;
-import java.nio.file.Files;
import java.nio.file.Path;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-import org.junit.jupiter.api.MethodOrderer;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestMethodOrder;
-import org.junit.jupiter.api.io.TempDir;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.ValueSource;
+import java.util.stream.Stream;
+import org.junit.jupiter.params.provider.Arguments;
-@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
class LongListOffHeapTest extends AbstractLongListTest {
- @TempDir
- Path testDirectory;
-
@Override
protected LongListOffHeap createLongList() {
return new LongListOffHeap();
@@ -63,206 +44,58 @@ protected LongListOffHeap createFullyParameterizedLongListWith(final int numLong
}
@Override
- protected LongListOffHeap createLongListFromFile(final Path file, final Configuration configuration)
- throws IOException {
- return new LongListOffHeap(file, configuration);
- }
-
- @Test
- void testCustomNumberOfLongs() throws IOException {
- try (final LongListOffHeap list =
- createFullyParameterizedLongListWith(DEFAULT_NUM_LONGS_PER_CHUNK, getSampleSize())) {
- list.updateValidRange(0, getSampleSize() - 1);
- for (int i = 0; i < getSampleSize(); i++) {
- list.put(i, i + 1);
- }
- final Path file = testDirectory.resolve("LongListOffHeapCustomLongCount.ll");
- // write longList data
- if (Files.exists(file)) {
- Files.delete(file);
- }
- list.writeToFile(file);
-
- final LongListOffHeap listFromDisk = createLongListFromFile(file, CONFIGURATION);
- assertEquals(list.dataCopy().size(), listFromDisk.dataCopy().size());
- }
- }
-
- @Test
- void testInsertAtTheEndOfTheList() {
- final LongListOffHeap list = createLongList();
- list.updateValidRange(0, DEFAULT_MAX_LONGS_TO_STORE - 1);
- assertDoesNotThrow(() -> list.put(DEFAULT_MAX_LONGS_TO_STORE - 1, 1));
- }
-
- @Test
- void testInsertAtTheEndOfTheListCustomConfigured() {
- final int maxLongs = 10;
- final LongListOffHeap list = createFullyParameterizedLongListWith(10, maxLongs);
- list.updateValidRange(0, maxLongs - 1);
- assertDoesNotThrow(() -> list.put(maxLongs - 1, 1));
- }
-
- @ParameterizedTest
- @ValueSource(ints = {0, 1, 5000, 9999, 10000}) // chunk size is 10K longs
- void testPersistListWithNonZeroMinValidIndex(final int chunkOffset) throws IOException {
- try (final LongListOffHeap list = createFullyParameterizedLongListWith(
- getSampleSize() / 100, // 100 chunks
- getSampleSize())) {
- list.updateValidRange(0, getSampleSize() - 1);
- for (int i = 1; i < getSampleSize(); i++) {
- list.put(i, i);
- }
-
- list.updateValidRange(getSampleSize() / 2 + chunkOffset, list.size() - 1);
-
- final Path file = testDirectory.resolve("LongListOffHeapHalfEmpty.ll");
- // write longList data
- if (Files.exists(file)) {
- Files.delete(file);
- }
- list.writeToFile(file);
-
- final LongListOffHeap longListFromFile = createLongListFromFile(file, CONFIGURATION);
-
- for (int i = 0; i < longListFromFile.size(); i++) {
- assertEquals(list.get(i), longListFromFile.get(i));
- }
- }
+ protected LongListOffHeap createLongListFromFile(final Path file) throws IOException {
+ return new LongListOffHeap(file, CONFIGURATION);
}
- @ParameterizedTest
- @ValueSource(ints = {0, 1, 5000, 9999, 10000}) // chunk size is 10K longs
- void testPersistShrunkList(final int chunkOffset) throws IOException {
- try (final LongListOffHeap list = createFullyParameterizedLongListWith(
- getSampleSize() / 100, // 100 chunks
- getSampleSize())) {
- list.updateValidRange(0, getSampleSize() - 1);
- for (int i = 1; i < getSampleSize(); i++) {
- list.put(i, i);
- }
-
- list.updateValidRange(0, getSampleSize() / 2 + chunkOffset);
-
- final Path file = testDirectory.resolve("LongListOffHeapHalfEmpty.ll");
- // write longList data
- if (Files.exists(file)) {
- Files.delete(file);
- }
- list.writeToFile(file);
-
- final LongListOffHeap longListFromFile = createLongListFromFile(file, CONFIGURATION);
-
- for (int i = 0; i < longListFromFile.size(); i++) {
- assertEquals(list.get(i), longListFromFile.get(i));
- }
- }
+ /**
+ * Provides a stream of writer-reader pairs specifically for the {@link LongListOffHeap} implementation.
+ * The writer is always {@link LongListOffHeap}, and it is paired with three reader implementations
+ * (heap, off-heap, and disk-based). This allows for testing whether data written by the
+ * {@link LongListOffHeap} can be correctly read back by all supported long list implementations.
+ *
+ * This method builds on {@link AbstractLongListTest#longListWriterBasedPairsProvider} to generate
+ * the specific writer-reader combinations for the {@link LongListOffHeap} implementation.
+ *
+ * @return a stream of argument pairs, each containing a {@link LongListOffHeap} writer
+ * and one of the supported reader implementations
+ */
+ static Stream longListWriterReaderPairsProvider() {
+ return longListWriterBasedPairsProvider(offHeapWriterFactory);
}
- @Test
- void updateListCreatedFromSnapshotPersistAndVerify() throws IOException {
- final int sampleSize = getSampleSize();
- try (final LongListOffHeap list = createFullyParameterizedLongListWith(
- sampleSize / 100, // 100 chunks, 100 longs each
- sampleSize + DEFAULT_NUM_LONGS_PER_CHUNK)) {
- list.updateValidRange(0, getSampleSize() - 1);
- for (int i = 0; i < getSampleSize(); i++) {
- list.put(i, i + 1);
- }
- final Path file = testDirectory.resolve("LongListOffHeap.ll");
- if (Files.exists(file)) {
- Files.delete(file);
- }
- // write longList data
- list.writeToFile(file);
-
- // restoring the list from the file
- try (LongListOffHeap longListFromFile = createLongListFromFile(file, CONFIGURATION)) {
-
- for (int i = 0; i < longListFromFile.size(); i++) {
- assertEquals(list.get(i), longListFromFile.get(i));
- }
- // write longList data again
- Files.delete(file);
- longListFromFile.writeToFile(file);
-
- // restoring the list from the file again
- try (LongListOffHeap longListFromFile2 = createLongListFromFile(file, CONFIGURATION)) {
- for (int i = 0; i < longListFromFile2.size(); i++) {
- assertEquals(longListFromFile.get(i), longListFromFile2.get(i));
- }
- }
- }
- }
- }
-
- @ParameterizedTest
- @ValueSource(ints = {2, 3, 4, 5, 10, 50})
- void minValidIndexRespectedInForEachTest(final int countDivider) throws InterruptedException {
- final int sampleSize = getSampleSize();
- try (final LongListOffHeap list = createFullyParameterizedLongListWith(
- sampleSize / 100, // 100 chunks, 100 longs each
- sampleSize + DEFAULT_NUM_LONGS_PER_CHUNK)) {
- list.updateValidRange(0, getSampleSize() - 1);
- for (int i = 1; i < getSampleSize(); i++) {
- list.put(i, i + 1);
- }
- final long minIndex = sampleSize / countDivider;
- list.updateValidRange(minIndex, list.size() - 1);
- final AtomicLong count = new AtomicLong(0);
- final Set keysInForEach = new HashSet<>();
- list.forEach((path, location) -> {
- count.incrementAndGet();
- keysInForEach.add(path);
- assertEquals(path + 1, location);
- });
- assertEquals(sampleSize - minIndex, count.get(), "Wrong number of valid index entries");
- assertEquals(sampleSize - minIndex, keysInForEach.size(), "Wrong number of valid index entries");
- }
+ /**
+ * Provides a stream of writer paired with two reader implementations for testing
+ * cross-compatibility.
+ *
+ * Used for {@link AbstractLongListTest#testUpdateMinToTheLowerEnd}
+ *
+ * @return a stream of arguments containing a writer and two readers.
+ */
+ static Stream longListWriterSecondReaderPairsProvider() {
+ return longListWriterSecondReaderPairsProviderBase(longListWriterReaderPairsProvider());
}
- @Test
- void testFileFormatBackwardCompatibility_halfEmpty() throws URISyntaxException, IOException {
- final Path pathToList = ResourceLoader.getFile("test_data/LongListOffHeapHalfEmpty_10k_10pc_v1.ll");
- try (final LongListOffHeap longListFromFile = createLongListFromFile(pathToList, CONFIGURATION)) {
- // half-empty
- for (int i = 0; i < 5_000; i++) {
- assertEquals(0, longListFromFile.get(i));
- }
- // half-full
- for (int i = 5_000; i < 10_000; i++) {
- assertEquals(i, longListFromFile.get(i));
- }
- }
+ /**
+ * Provides writer-reader pairs combined with range configurations for testing.
+ *
+ * Used for {@link AbstractLongListTest#testWriteReadRangeElement}
+ *
+ * @return a stream of arguments for range-based parameterized tests
+ */
+ static Stream longListWriterReaderRangePairsProvider() {
+ return longListWriterReaderRangePairsProviderBase(longListWriterReaderPairsProvider());
}
- @Test
- void testUnsupportedVersion() throws URISyntaxException {
- final Path pathToList = ResourceLoader.getFile("test_data/LongListOffHeap_unsupported_version.ll");
- assertThrows(IOException.class, () -> {
- //noinspection EmptyTryBlock
- try (final LongListOffHeap ignored = new LongListOffHeap(pathToList, CONFIGURATION)) {
- // no op
- }
- });
- }
-
- @Test
- void testBigIndex() throws IOException {
- try (LongListOffHeap list = new LongListOffHeap()) {
- long bigIndex = Integer.MAX_VALUE + 1L;
- list.updateValidRange(bigIndex, bigIndex);
- list.put(bigIndex, 1);
-
- assertEquals(1, list.get(bigIndex));
- final Path file = testDirectory.resolve("LongListLargeIndex.ll");
- if (Files.exists(file)) {
- Files.delete(file);
- }
- list.writeToFile(file);
- try (LongListOffHeap listFromFile = new LongListOffHeap(file, CONFIGURATION)) {
- assertEquals(1, listFromFile.get(bigIndex));
- }
- }
+ /**
+ * Provides writer-reader pairs combined with chunk offset configurations (second set) for testing.
+ *
+ * Used for {@link AbstractLongListTest#testPersistListWithNonZeroMinValidIndex}
+ * and {@link AbstractLongListTest#testPersistShrunkList}
+ *
+ * @return a stream of arguments for chunk offset based parameterized tests
+ */
+ static Stream longListWriterReaderOffsetPairsProvider() {
+ return longListWriterReaderOffsetPairsProviderBase(longListWriterReaderPairsProvider());
}
}
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListTest.java
deleted file mode 100644
index 64cd948c1525..000000000000
--- a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListTest.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2022-2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.swirlds.merkledb.collections;
-
-import static com.swirlds.merkledb.collections.LongListOffHeap.DEFAULT_RESERVED_BUFFER_LENGTH;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-
-import java.util.Spliterator;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.function.LongConsumer;
-import java.util.stream.IntStream;
-import java.util.stream.Stream;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-class LongListTest {
-
- @Test
- void constructWithTooBigChunkSizeThrowsException() {
- assertThrows(
- ArithmeticException.class,
- () -> new LongListHeap((Integer.MAX_VALUE / 8) + 1, Integer.MAX_VALUE, 0),
- "Check that ArithmeticException of num longs per chuck is too big");
- assertThrows(
- IllegalArgumentException.class,
- () -> new LongListHeap(Integer.MAX_VALUE - 1, Integer.MAX_VALUE, 0),
- "Check that IllegalArgumentException of num longs per chuck is too big");
- }
-
- @Test
- void spliteratorEdgeCasesWork() {
- final LongConsumer firstConsumer = mock(LongConsumer.class);
- final LongConsumer secondConsumer = mock(LongConsumer.class);
-
- final LongListHeap list = new LongListHeap(32, 32, 0);
- list.updateValidRange(0, 3);
- for (int i = 1; i <= 3; i++) {
- list.put(i, i);
- }
-
- final LongListSpliterator subject = new LongListSpliterator(list);
-
- assertThrows(
- IllegalStateException.class,
- subject::getComparator,
- "An unordered spliterator should not be asked to provide an ordering");
-
- final Spliterator.OfLong firstSplit = subject.trySplit();
- assertNotNull(firstSplit, "firstSplit should not be null");
- assertEquals(2, subject.estimateSize(), "Splitting 4 elements should yield 2");
- final Spliterator.OfLong secondSplit = subject.trySplit();
- assertNotNull(secondSplit, "secondSplit should not be null");
- assertEquals(1, subject.estimateSize(), "Splitting 2 elements should yield 1");
- assertNull(subject.trySplit(), "Splitting 1 element should yield null");
-
- assertTrue(firstSplit.tryAdvance(firstConsumer), "First split should yield 0 first");
- verify(firstConsumer).accept(0);
- assertTrue(firstSplit.tryAdvance(firstConsumer), "First split should yield 1 second");
- verify(firstConsumer).accept(1);
- assertFalse(firstSplit.tryAdvance(firstConsumer), "First split should be exhausted after 2 yields");
-
- secondSplit.forEachRemaining(secondConsumer);
- verify(secondConsumer).accept(2);
- verifyNoMoreInteractions(secondConsumer);
- }
-
- @ParameterizedTest
- @MethodSource("provideLongLists")
- void test4089(final AbstractLongList> list) {
- list.updateValidRange(0, list.maxLongs - 1);
- // Issue #4089: ArrayIndexOutOfBoundsException from VirtualMap.put()
- final long maxLongs = list.maxLongs;
- final int defaultValue = -1;
- final AtomicBoolean done = new AtomicBoolean();
-
- IntStream.range(0, 2).parallel().forEach(thread -> {
- if (thread == 0) {
- // Getter
- while (!done.get()) {
- assertEquals(defaultValue, list.get(maxLongs - 2, defaultValue), "Value should be whats expected.");
- }
- } else {
- // Putter
- list.put(maxLongs - 1, 1);
- done.set(true);
- }
- });
- }
-
- static Stream provideLongLists() {
- final int numLongsPerChunk = 32;
- final int maxLongs = numLongsPerChunk * 4096;
- return Stream.of(
- new LongListHeap(numLongsPerChunk, maxLongs, 0),
- new LongListOffHeap(numLongsPerChunk, maxLongs, DEFAULT_RESERVED_BUFFER_LENGTH));
- }
-}
diff --git a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListValidRangeTest.java b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListValidRangeTest.java
index ad10dff79a40..23b34b0b0102 100644
--- a/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListValidRangeTest.java
+++ b/platform-sdk/swirlds-merkledb/src/test/java/com/swirlds/merkledb/collections/LongListValidRangeTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2023-2024 Hedera Hashgraph, LLC
+ * Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -58,7 +58,7 @@ void testUpdateMinValidIndexNegativeValue(AbstractLongList> list) {
@ParameterizedTest
@MethodSource("defaultLongListProvider")
@DisplayName("Update min and max index to -1")
- void testUpdateMinMaxMinuxOne(AbstractLongList> list) {
+ void testUpdateMinMaxMinusOne(AbstractLongList> list) {
this.list = list;
assertDoesNotThrow(() -> list.updateValidRange(-1, -1));
}
diff --git a/platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongListOffHeapHalfEmpty_10k_10pc_v1.ll b/platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongListHalfEmpty_10k_10pc_v1.ll
similarity index 100%
rename from platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongListOffHeapHalfEmpty_10k_10pc_v1.ll
rename to platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongListHalfEmpty_10k_10pc_v1.ll
diff --git a/platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongListOffHeap_unsupported_version.ll b/platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongList_unsupported_version.ll
similarity index 100%
rename from platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongListOffHeap_unsupported_version.ll
rename to platform-sdk/swirlds-merkledb/src/test/resources/test_data/LongList_unsupported_version.ll