Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ISSUE-283][FEATURE] Support snappy compression/decompression #304

Merged
merged 2 commits into from
Nov 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions common/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,11 @@
<artifactId>zstd-jni</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ public static Codec newInstance(RssConf rssConf) {
switch (type) {
case ZSTD:
return new ZstdCodec(rssConf.get(ZSTD_COMPRESSION_LEVEL));
case SNAPPY:
return new SnappyCodec();
case NOOP:
return new NoOpCodec();
case LZ4:
Expand All @@ -39,13 +41,24 @@ public static Codec newInstance(RssConf rssConf) {
}
}

/**
*
* @param src
* @param uncompressedLen
* @param dest
* @param destOffset
*/
public abstract void decompress(ByteBuffer src, int uncompressedLen, ByteBuffer dest, int destOffset);

/**
* Compress bytes into a byte array.
*/
public abstract byte[] compress(byte[] src);

public enum Type {
LZ4,
ZSTD,
NOOP,
SNAPPY,
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.uniffle.common.compression;

import java.io.IOException;
import java.nio.ByteBuffer;

import org.xerial.snappy.Snappy;

import org.apache.uniffle.common.exception.RssException;

public class SnappyCodec extends Codec {
@Override
public void decompress(ByteBuffer src, int uncompressedLen, ByteBuffer dest, int destOffset) {
try {
if (!src.isDirect() && !dest.isDirect()) {
int size = Snappy.uncompress(src.array(), src.position(), src.limit() - src.position(), dest.array(),
destOffset);
if (size != uncompressedLen) {
throw new RssException(
"This should not happen that the decompressed data size is not equals to original size.");
}
return;
}
if (src.isDirect() && dest.isDirect()) {
if (destOffset != 0) {
throw new RssException(
"Snappy decompression does not support non-zero offset for destination direct ByteBuffer");
}
int size = Snappy.uncompress(src, dest);
if (size != uncompressedLen) {
throw new RssException(
"This should not happen that the decompressed data size is not equals to original size.");
}
return;
}
} catch (IOException e) {
throw new RssException("Failed to uncompress by Snappy", e);
}

throw new IllegalStateException("Snappy only supports the same type of bytebuffer decompression.");
}

@Override
public byte[] compress(byte[] src) {
try {
return Snappy.compress(src);
} catch (IOException e) {
throw new RssException("Failed to uncompress by Snappy", e);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public class RssClientConf {
.enumType(Codec.Type.class)
.defaultValue(LZ4)
.withDescription("The compression codec is used to compress the shuffle data. "
+ "Default codec is `LZ4`, `ZSTD` also can be used.");
+ "Default codec is `LZ4`. Other options are`ZSTD` and `SNAPPY`.");

public static final ConfigOption<Integer> ZSTD_COMPRESSION_LEVEL = ConfigOptions
.key("rss.client.io.compression.zstd.level")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ public class CompressionTest {

static List<Arguments> testCompression() {
int[] sizes = {1, 1024, 128 * 1024, 512 * 1024, 1024 * 1024, 4 * 1024 * 1024};
Codec.Type[] types = {Codec.Type.ZSTD, Codec.Type.LZ4};
Codec.Type[] types = {Codec.Type.ZSTD, Codec.Type.LZ4, Codec.Type.SNAPPY};

List<Arguments> arguments = new ArrayList<>();
for (int size : sizes) {
Expand Down
2 changes: 1 addition & 1 deletion docs/client_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ These configurations are shared by all types of clients.
|<client_type>.rss.client.assignment.tags|-|The comma-separated list of tags for deciding assignment shuffle servers. Notice that the SHUFFLE_SERVER_VERSION will always as the assignment tag whether this conf is set or not|
|<client_type>.rss.client.data.commit.pool.size|The number of assigned shuffle servers|The thread size for sending commit to shuffle servers|
|<client_type>.rss.client.assignment.shuffle.nodes.max|-1|The number of required assignment shuffle servers. If it is less than 0 or equals to 0 or greater than the coordinator's config of "rss.coordinator.shuffle.nodes.max", it will use the size of "rss.coordinator.shuffle.nodes.max" default|
|<client_type>.rss.client.io.compression.codec|lz4|The compression codec is used to compress the shuffle data. Default codec is `lz4`, `zstd` also can be used.|
|<client_type>.rss.client.io.compression.codec|lz4|The compression codec is used to compress the shuffle data. Default codec is `lz4`. Other options are`ZSTD` and `SNAPPY`.|
|<client_type>.rss.client.io.compression.zstd.level|3|The zstd compression level, the default level is 3|
|<client_type>.rss.client.shuffle.data.distribution.type|NORMAL|The type of partition shuffle data distribution, including normal and local_order. The default value is normal. Now this config is only valid in Spark3.x|
Notice:
Expand Down
7 changes: 7 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
<spotbugs-maven-plugin.version>4.7.0.0</spotbugs-maven-plugin.version>
<system-rules.version>1.19.0</system-rules.version>
<zstd-jni.version>1.5.2-3</zstd-jni.version>
<snappy-java.version>1.1.8.4</snappy-java.version>
<test.redirectToFile>true</test.redirectToFile>
<trimStackTrace>false</trimStackTrace>
</properties>
Expand Down Expand Up @@ -607,6 +608,12 @@
<artifactId>zstd-jni</artifactId>
<version>${zstd-jni.version}</version>
</dependency>

<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
<version>${snappy-java.version}</version>
</dependency>
</dependencies>
</dependencyManagement>

Expand Down