-
Notifications
You must be signed in to change notification settings - Fork 3.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ARROW-10880: [Java] Support compressing RecordBatch IPC buffers by LZ4 #8949
Changes from all commits
4d27ba2
47eb6dd
4b260cd
ed66aa4
7e274a6
b5143a1
0ddfa16
9394c0f
04d60a7
d4a6807
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
<?xml version="1.0"?> | ||
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor | ||
license agreements. See the NOTICE file distributed with this work for additional | ||
information regarding copyright ownership. The ASF licenses this file to | ||
You under the Apache License, Version 2.0 (the "License"); you may not use | ||
this file except in compliance with the License. You may obtain a copy of | ||
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required | ||
by applicable law or agreed to in writing, software distributed under the | ||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS | ||
OF ANY KIND, either express or implied. See the License for the specific | ||
language governing permissions and limitations under the License. --> | ||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> | ||
<modelVersion>4.0.0</modelVersion> | ||
<parent> | ||
<groupId>org.apache.arrow</groupId> | ||
<artifactId>arrow-java-root</artifactId> | ||
<version>4.0.0-SNAPSHOT</version> | ||
</parent> | ||
<artifactId>arrow-compression</artifactId> | ||
<name>Arrow Compression</name> | ||
<description>(Experimental/Contrib) A library for working with the compression/decompression of Arrow data.</description> | ||
|
||
<dependencies> | ||
<dependency> | ||
<groupId>org.apache.arrow</groupId> | ||
<artifactId>arrow-vector</artifactId> | ||
<version>${project.version}</version> | ||
<classifier>${arrow.vector.classifier}</classifier> | ||
</dependency> | ||
<dependency> | ||
<groupId>org.apache.arrow</groupId> | ||
<artifactId>arrow-memory-core</artifactId> | ||
<version>${project.version}</version> | ||
</dependency> | ||
<dependency> | ||
<groupId>org.apache.arrow</groupId> | ||
<artifactId>arrow-memory-unsafe</artifactId> | ||
<version>${project.version}</version> | ||
<scope>test</scope> | ||
</dependency> | ||
<dependency> | ||
<groupId>org.apache.commons</groupId> | ||
<artifactId>commons-compress</artifactId> | ||
<version>1.20</version> | ||
</dependency> | ||
<dependency> | ||
<groupId>io.netty</groupId> | ||
<artifactId>netty-common</artifactId> | ||
</dependency> | ||
</dependencies> | ||
</project> |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.arrow.compression; | ||
|
||
import org.apache.arrow.vector.compression.CompressionCodec; | ||
import org.apache.arrow.vector.compression.CompressionUtil; | ||
|
||
/** | ||
* A factory implementation based on Apache Commons library. | ||
*/ | ||
public class CommonsCompressionFactory implements CompressionCodec.Factory { | ||
|
||
public static final CommonsCompressionFactory INSTANCE = new CommonsCompressionFactory(); | ||
|
||
@Override | ||
public CompressionCodec createCodec(CompressionUtil.CodecType codecType) { | ||
switch (codecType) { | ||
case LZ4_FRAME: | ||
return new Lz4CompressionCodec(); | ||
default: | ||
throw new IllegalArgumentException("Compression type not supported: " + codecType); | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,157 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.arrow.compression; | ||
|
||
import java.io.ByteArrayInputStream; | ||
import java.io.ByteArrayOutputStream; | ||
import java.io.IOException; | ||
import java.io.InputStream; | ||
import java.io.OutputStream; | ||
|
||
import org.apache.arrow.memory.ArrowBuf; | ||
import org.apache.arrow.memory.BufferAllocator; | ||
import org.apache.arrow.memory.util.MemoryUtil; | ||
import org.apache.arrow.util.Preconditions; | ||
import org.apache.arrow.vector.compression.CompressionCodec; | ||
import org.apache.arrow.vector.compression.CompressionUtil; | ||
import org.apache.commons.compress.compressors.lz4.FramedLZ4CompressorInputStream; | ||
import org.apache.commons.compress.compressors.lz4.FramedLZ4CompressorOutputStream; | ||
import org.apache.commons.compress.utils.IOUtils; | ||
|
||
import io.netty.util.internal.PlatformDependent; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ahh this is where netty is used. we don't have an arrow wrapper for it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess no for now. Maybe we can have one in the future, so we can remove the dependency on Netty (and other dependencies on Netty as well). |
||
|
||
/** | ||
* Compression codec for the LZ4 algorithm. | ||
*/ | ||
public class Lz4CompressionCodec implements CompressionCodec { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you don't want to do it for this PR it is OK, but I think we should likely separate the functionality here into two parts. Arrow specific and Core compression/decompresion. The Arrow specific parts (parsing/writing lengths into buffers) belongs in Vector. That way it would hopefully be easier to substitute different implementations. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good suggestion. I have opened ARROW-11899 to track it. |
||
|
||
@Override | ||
public ArrowBuf compress(BufferAllocator allocator, ArrowBuf uncompressedBuffer) { | ||
Preconditions.checkArgument(uncompressedBuffer.writerIndex() <= Integer.MAX_VALUE, | ||
"The uncompressed buffer size exceeds the integer limit"); | ||
|
||
if (uncompressedBuffer.writerIndex() == 0L) { | ||
// shortcut for empty buffer | ||
ArrowBuf compressedBuffer = allocator.buffer(CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH); | ||
compressedBuffer.setLong(0, 0); | ||
compressedBuffer.writerIndex(CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH); | ||
uncompressedBuffer.close(); | ||
return compressedBuffer; | ||
} | ||
|
||
try { | ||
ArrowBuf compressedBuffer = doCompress(allocator, uncompressedBuffer); | ||
long compressedLength = compressedBuffer.writerIndex() - CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH; | ||
if (compressedLength > uncompressedBuffer.writerIndex()) { | ||
// compressed buffer is larger, send the raw buffer | ||
compressedBuffer.close(); | ||
compressedBuffer = CompressionUtil.packageRawBuffer(allocator, uncompressedBuffer); | ||
} | ||
|
||
uncompressedBuffer.close(); | ||
return compressedBuffer; | ||
} catch (IOException e) { | ||
throw new RuntimeException(e); | ||
} | ||
} | ||
|
||
private ArrowBuf doCompress(BufferAllocator allocator, ArrowBuf uncompressedBuffer) throws IOException { | ||
byte[] inBytes = new byte[(int) uncompressedBuffer.writerIndex()]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we should open up a follow-up issue to see if we can find a good library that will work with native memory. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sure. I have opened ARROW-11901 to track it. I also want to share some thoughts briefly: to use the native memory directly, we need a way to wrap We need some third party library to achieve that. We also need to evaluate the performance thereafter, because the Commons-compress library also uses on-heap data extensively, the copy between on-heap and off-heap data can be difficult to avoid. |
||
PlatformDependent.copyMemory(uncompressedBuffer.memoryAddress(), inBytes, 0, uncompressedBuffer.writerIndex()); | ||
ByteArrayOutputStream baos = new ByteArrayOutputStream(); | ||
try (InputStream in = new ByteArrayInputStream(inBytes); | ||
OutputStream out = new FramedLZ4CompressorOutputStream(baos)) { | ||
IOUtils.copy(in, out); | ||
} | ||
|
||
byte[] outBytes = baos.toByteArray(); | ||
|
||
ArrowBuf compressedBuffer = allocator.buffer(CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH + outBytes.length); | ||
|
||
long uncompressedLength = uncompressedBuffer.writerIndex(); | ||
if (!MemoryUtil.LITTLE_ENDIAN) { | ||
uncompressedLength = Long.reverseBytes(uncompressedLength); | ||
} | ||
// first 8 bytes reserved for uncompressed length, according to the specification | ||
compressedBuffer.setLong(0, uncompressedLength); | ||
|
||
PlatformDependent.copyMemory( | ||
outBytes, 0, compressedBuffer.memoryAddress() + CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH, outBytes.length); | ||
compressedBuffer.writerIndex(CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH + outBytes.length); | ||
return compressedBuffer; | ||
} | ||
|
||
@Override | ||
public ArrowBuf decompress(BufferAllocator allocator, ArrowBuf compressedBuffer) { | ||
Preconditions.checkArgument(compressedBuffer.writerIndex() <= Integer.MAX_VALUE, | ||
"The compressed buffer size exceeds the integer limit"); | ||
|
||
Preconditions.checkArgument(compressedBuffer.writerIndex() >= CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. When the compressedBuffer is empty, the task will fail by this check. Can we remove this check and directly return the compressedBuffer when empty. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. According to the design, this should not happen, because even if the buffer is empty, a head would be attached containing the buffer length. We have a test case for such case. |
||
"Not enough data to decompress."); | ||
|
||
long decompressedLength = compressedBuffer.getLong(0); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If we call the compress in native side and decompress the buffer in java side, how to ensure it can work well by There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this memory layout is consistent with the C++ native implementation. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems only reserve the size but not write the uncompressed size in here. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The statement immediately above writes the size:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have fixed this problem. It was due to an optimization of the C++ implementation: when an array contains no nulls, its validity buffer can be empty. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note this is allowed by the format spec:
|
||
if (!MemoryUtil.LITTLE_ENDIAN) { | ||
decompressedLength = Long.reverseBytes(decompressedLength); | ||
} | ||
|
||
if (decompressedLength == 0L) { | ||
// shortcut for empty buffer | ||
compressedBuffer.close(); | ||
return allocator.getEmpty(); | ||
} | ||
|
||
if (decompressedLength == CompressionUtil.NO_COMPRESSION_LENGTH) { | ||
// no compression | ||
return CompressionUtil.extractUncompressedBuffer(compressedBuffer); | ||
} | ||
|
||
try { | ||
ArrowBuf decompressedBuffer = doDecompress(allocator, compressedBuffer); | ||
compressedBuffer.close(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a known issue. We will fix it ASAP. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I also encountered this There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @garyelephant Do you have an example reproducing the problem? |
||
return decompressedBuffer; | ||
} catch (IOException e) { | ||
throw new RuntimeException(e); | ||
} | ||
} | ||
|
||
private ArrowBuf doDecompress(BufferAllocator allocator, ArrowBuf compressedBuffer) throws IOException { | ||
long decompressedLength = compressedBuffer.getLong(0); | ||
if (!MemoryUtil.LITTLE_ENDIAN) { | ||
decompressedLength = Long.reverseBytes(decompressedLength); | ||
} | ||
|
||
byte[] inBytes = new byte[(int) (compressedBuffer.writerIndex() - CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH)]; | ||
PlatformDependent.copyMemory( | ||
compressedBuffer.memoryAddress() + CompressionUtil.SIZE_OF_UNCOMPRESSED_LENGTH, inBytes, 0, inBytes.length); | ||
ByteArrayOutputStream out = new ByteArrayOutputStream((int) decompressedLength); | ||
try (InputStream in = new FramedLZ4CompressorInputStream(new ByteArrayInputStream(inBytes))) { | ||
IOUtils.copy(in, out); | ||
} | ||
|
||
byte[] outBytes = out.toByteArray(); | ||
ArrowBuf decompressedBuffer = allocator.buffer(outBytes.length); | ||
PlatformDependent.copyMemory(outBytes, 0, decompressedBuffer.memoryAddress(), outBytes.length); | ||
decompressedBuffer.writerIndex(decompressedLength); | ||
return decompressedBuffer; | ||
} | ||
|
||
@Override | ||
public CompressionUtil.CodecType getCodecType() { | ||
return CompressionUtil.CodecType.LZ4_FRAME; | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why is netty required?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
vector
module requires this library. If we do not add it in the dependency list, we get an errow when building with maven:There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
hmm, wonder why netty is required here though, I'll take a closer look.