Skip to content

Commit

Permalink
add atomic counter for recording compression results on the test world
Browse files Browse the repository at this point in the history
  • Loading branch information
douira committed Dec 16, 2023
1 parent 645fa0d commit 7553494
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 7 deletions.
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package me.jellysquid.mods.sodium.client.render.chunk.translucent_sorting.bsp_tree;

import java.nio.IntBuffer;
import java.lang.Math;

import it.unimi.dsi.fastutil.ints.IntArrayList;
import it.unimi.dsi.fastutil.ints.IntConsumer;
Expand Down Expand Up @@ -91,8 +90,12 @@ static int[] compressIndexes(IntArrayList indexes) {
* 6x5b, 8x4b, 10x3b, 16x2b, 32x1b
*/
static int[] compressIndexes(IntArrayList indexes, boolean doSort) {
// TimingRecorder.incrementBy(Counter.COMPRESSION_CANDIDATES, 1);
// TimingRecorder.incrementBy(Counter.UNCOMPRESSED_SIZE, indexes.size());

// bail on short lists
if (indexes.size() < INDEX_COMPRESSION_MIN_LENGTH || indexes.size() > 1 << 10) {
// TimingRecorder.incrementBy(Counter.COMPRESSED_SIZE, indexes.size());
return indexes.toIntArray();
}

Expand Down Expand Up @@ -125,6 +128,7 @@ static int[] compressIndexes(IntArrayList indexes, boolean doSort) {
// stop if the first index is too large
int firstIndex = workingList.getInt(0);
if (firstIndex > 1 << 17) {
// TimingRecorder.incrementBy(Counter.COMPRESSED_SIZE, indexes.size());
return indexes.toIntArray();
}

Expand All @@ -140,13 +144,17 @@ static int[] compressIndexes(IntArrayList indexes, boolean doSort) {
compressed[1] = minDelta;

// System.out.println(
// "Densely compressed " + indexes.size() + " indexes to 2 ints, compression ratio " +
// (indexes.size() / 2));
// "Densely compressed " + indexes.size() + " indexes to 2 ints, compression
// ratio " +
// (indexes.size() / 2));
// TimingRecorder.incrementBy(Counter.COMPRESSION_SUCCESS, 1);
// TimingRecorder.incrementBy(Counter.COMPRESSED_SIZE, 2);
return compressed;
}

// stop if the width is too large (and compression would make no sense)
if (deltaRangeWidth > 16) {
// TimingRecorder.incrementBy(Counter.COMPRESSED_SIZE, indexes.size());
return indexes.toIntArray();
}

Expand Down Expand Up @@ -187,8 +195,11 @@ static int[] compressIndexes(IntArrayList indexes, boolean doSort) {
compressed[outputIndex++] = gatherInt;
}

// System.out.println("Compressed " + indexes.size() + " indexes to " + size + " ints, compression ratio "
// + (indexes.size() / size));
// System.out.println("Compressed " + indexes.size() + " indexes to " + size + "
// ints, compression ratio "
// + (indexes.size() / size));
// TimingRecorder.incrementBy(Counter.COMPRESSION_SUCCESS, 1);
// TimingRecorder.incrementBy(Counter.COMPRESSED_SIZE, size);
return compressed;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,15 @@
import it.unimi.dsi.fastutil.objects.ReferenceArrayList;

/**
* Compression results on 1992 sections:
* compression candidates 55084, compression performed 1202 (ratio: 2.1%)
* uncompressed size 397665, compressed size 170944 (ratio: 42.9%)
* Removing the compresson minimum size results in a total compression ratio of
* 34% and a 92% success rate. This isn't much of an improvement, it seems the
* large candidates make up most of the compressable data. Increasing the
* minimum size to 16 lowers the success rate to 3.4% while the total
* compression ratio is 39%.
*
* test scenario: test world, 1991 events, total 538121 quads, 32 rd, 15 chunk
* builder threads
*
Expand Down Expand Up @@ -39,8 +48,14 @@ static record TimedEvent(int size, long ns) {

public static enum Counter {
UNIQUE_TRIGGERS,

QUADS,
BSP_SECTIONS
BSP_SECTIONS,

COMPRESSION_CANDIDATES,
COMPRESSION_SUCCESS,
COMPRESSED_SIZE,
UNCOMPRESSED_SIZE
}

private ReferenceArrayList<TimedEvent> events = new ReferenceArrayList<>(1000);
Expand Down Expand Up @@ -153,10 +168,20 @@ public static void resetAll() {
&& counters.containsKey(Counter.QUADS)
&& counters.containsKey(Counter.BSP_SECTIONS)) {
System.out.println("Triggers per quad: " +
((double)getCounter(Counter.UNIQUE_TRIGGERS).get() / getCounter(Counter.QUADS).get()));
((double) getCounter(Counter.UNIQUE_TRIGGERS).get() / getCounter(Counter.QUADS).get()));
System.out.println("Triggers per section: " +
(getCounter(Counter.UNIQUE_TRIGGERS).get() / getCounter(Counter.BSP_SECTIONS).get()));
}
if (counters.containsKey(Counter.COMPRESSION_CANDIDATES)
&& counters.containsKey(Counter.COMPRESSION_SUCCESS)
&& counters.containsKey(Counter.COMPRESSED_SIZE)
&& counters.containsKey(Counter.UNCOMPRESSED_SIZE)) {
System.out.println("Compressed size ratio: " +
((double) getCounter(Counter.COMPRESSED_SIZE).get() / getCounter(Counter.UNCOMPRESSED_SIZE).get()));
System.out.println("Compression success ratio: " +
((double) getCounter(Counter.COMPRESSION_SUCCESS).get()
/ getCounter(Counter.COMPRESSION_CANDIDATES).get()));
}

counters.clear();
}
Expand Down

0 comments on commit 7553494

Please sign in to comment.