diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java index 5a2236c66ba11..f37864a0f5393 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java @@ -30,7 +30,7 @@ import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.apache.spark.sql.execution.vectorized.ColumnVector; -import org.apache.spark.sql.execution.vectorized.MutableColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; import org.apache.spark.sql.types.DataTypes; import org.apache.spark.sql.types.DecimalType; @@ -136,9 +136,9 @@ private boolean next() throws IOException { /** * Reads `total` values from this columnReader into column. */ - void readBatch(int total, MutableColumnVector column) throws IOException { + void readBatch(int total, WritableColumnVector column) throws IOException { int rowId = 0; - MutableColumnVector dictionaryIds = null; + WritableColumnVector dictionaryIds = null; if (dictionary != null) { // SPARK-16334: We only maintain a single dictionary per row batch, so that it can be used to // decode all previous dictionary encoded pages if we ever encounter a non-dictionary encoded @@ -223,7 +223,7 @@ void readBatch(int total, MutableColumnVector column) throws IOException { private void decodeDictionaryIds( int rowId, int num, - MutableColumnVector column, + WritableColumnVector column, ColumnVector dictionaryIds) { switch (descriptor.getType()) { case INT32: @@ -350,13 +350,13 @@ private void decodeDictionaryIds( * is guaranteed that num is smaller than the number of values left in the current page. */ - private void readBooleanBatch(int rowId, int num, MutableColumnVector column) throws IOException { + private void readBooleanBatch(int rowId, int num, WritableColumnVector column) throws IOException { assert(column.dataType() == DataTypes.BooleanType); defColumn.readBooleans( num, column, rowId, maxDefLevel, (VectorizedValuesReader) dataColumn); } - private void readIntBatch(int rowId, int num, MutableColumnVector column) throws IOException { + private void readIntBatch(int rowId, int num, WritableColumnVector column) throws IOException { // This is where we implement support for the valid type conversions. // TODO: implement remaining type conversions if (column.dataType() == DataTypes.IntegerType || column.dataType() == DataTypes.DateType || @@ -374,7 +374,7 @@ private void readIntBatch(int rowId, int num, MutableColumnVector column) throws } } - private void readLongBatch(int rowId, int num, MutableColumnVector column) throws IOException { + private void readLongBatch(int rowId, int num, WritableColumnVector column) throws IOException { // This is where we implement support for the valid type conversions. if (column.dataType() == DataTypes.LongType || DecimalType.is64BitDecimalType(column.dataType())) { @@ -393,7 +393,7 @@ private void readLongBatch(int rowId, int num, MutableColumnVector column) throw } } - private void readFloatBatch(int rowId, int num, MutableColumnVector column) throws IOException { + private void readFloatBatch(int rowId, int num, WritableColumnVector column) throws IOException { // This is where we implement support for the valid type conversions. // TODO: support implicit cast to double? if (column.dataType() == DataTypes.FloatType) { @@ -404,7 +404,7 @@ private void readFloatBatch(int rowId, int num, MutableColumnVector column) thro } } - private void readDoubleBatch(int rowId, int num, MutableColumnVector column) throws IOException { + private void readDoubleBatch(int rowId, int num, WritableColumnVector column) throws IOException { // This is where we implement support for the valid type conversions. // TODO: implement remaining type conversions if (column.dataType() == DataTypes.DoubleType) { @@ -415,7 +415,7 @@ private void readDoubleBatch(int rowId, int num, MutableColumnVector column) thr } } - private void readBinaryBatch(int rowId, int num, MutableColumnVector column) throws IOException { + private void readBinaryBatch(int rowId, int num, WritableColumnVector column) throws IOException { // This is where we implement support for the valid type conversions. // TODO: implement remaining type conversions VectorizedValuesReader data = (VectorizedValuesReader) dataColumn; @@ -439,7 +439,7 @@ private void readBinaryBatch(int rowId, int num, MutableColumnVector column) thr private void readFixedLenByteArrayBatch( int rowId, int num, - MutableColumnVector column, + WritableColumnVector column, int arrayLen) throws IOException { VectorizedValuesReader data = (VectorizedValuesReader) dataColumn; // This is where we implement support for the valid type conversions. diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java index 910f004592477..0cacf0c9c93a5 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java @@ -31,7 +31,7 @@ import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.execution.vectorized.ColumnVectorUtils; import org.apache.spark.sql.execution.vectorized.ColumnarBatch; -import org.apache.spark.sql.execution.vectorized.MutableColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; import org.apache.spark.sql.types.StructField; @@ -93,7 +93,7 @@ public class VectorizedParquetRecordReader extends SpecificParquetRecordReaderBa */ private ColumnarBatch columnarBatch; - private MutableColumnVector[] columnVectors; + private WritableColumnVector[] columnVectors; /** * If true, this class returns batches instead of rows. diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedPlainValuesReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedPlainValuesReader.java index 78c86680bebbc..5b75f719339fb 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedPlainValuesReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedPlainValuesReader.java @@ -20,7 +20,7 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; -import org.apache.spark.sql.execution.vectorized.MutableColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; import org.apache.spark.unsafe.Platform; import org.apache.parquet.column.values.ValuesReader; @@ -56,7 +56,7 @@ public void skip() { } @Override - public final void readBooleans(int total, MutableColumnVector c, int rowId) { + public final void readBooleans(int total, WritableColumnVector c, int rowId) { // TODO: properly vectorize this for (int i = 0; i < total; i++) { c.putBoolean(rowId + i, readBoolean()); @@ -64,31 +64,31 @@ public final void readBooleans(int total, MutableColumnVector c, int rowId) { } @Override - public final void readIntegers(int total, MutableColumnVector c, int rowId) { + public final void readIntegers(int total, WritableColumnVector c, int rowId) { c.putIntsLittleEndian(rowId, total, buffer, offset - Platform.BYTE_ARRAY_OFFSET); offset += 4 * total; } @Override - public final void readLongs(int total, MutableColumnVector c, int rowId) { + public final void readLongs(int total, WritableColumnVector c, int rowId) { c.putLongsLittleEndian(rowId, total, buffer, offset - Platform.BYTE_ARRAY_OFFSET); offset += 8 * total; } @Override - public final void readFloats(int total, MutableColumnVector c, int rowId) { + public final void readFloats(int total, WritableColumnVector c, int rowId) { c.putFloats(rowId, total, buffer, offset - Platform.BYTE_ARRAY_OFFSET); offset += 4 * total; } @Override - public final void readDoubles(int total, MutableColumnVector c, int rowId) { + public final void readDoubles(int total, WritableColumnVector c, int rowId) { c.putDoubles(rowId, total, buffer, offset - Platform.BYTE_ARRAY_OFFSET); offset += 8 * total; } @Override - public final void readBytes(int total, MutableColumnVector c, int rowId) { + public final void readBytes(int total, WritableColumnVector c, int rowId) { for (int i = 0; i < total; i++) { // Bytes are stored as a 4-byte little endian int. Just read the first byte. // TODO: consider pushing this in ColumnVector by adding a readBytes with a stride. @@ -159,7 +159,7 @@ public final double readDouble() { } @Override - public final void readBinary(int total, MutableColumnVector v, int rowId) { + public final void readBinary(int total, WritableColumnVector v, int rowId) { for (int i = 0; i < total; i++) { int len = readInteger(); int start = offset; diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedRleValuesReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedRleValuesReader.java index b79d73a227365..fc7fa70c39419 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedRleValuesReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedRleValuesReader.java @@ -25,7 +25,7 @@ import org.apache.parquet.io.ParquetDecodingException; import org.apache.parquet.io.api.Binary; -import org.apache.spark.sql.execution.vectorized.MutableColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; /** * A values reader for Parquet's run-length encoded data. This is based off of the version in @@ -179,7 +179,7 @@ public int readInteger() { */ public void readIntegers( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -214,7 +214,7 @@ public void readIntegers( // TODO: can this code duplication be removed without a perf penalty? public void readBooleans( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -248,7 +248,7 @@ public void readBooleans( public void readBytes( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -282,7 +282,7 @@ public void readBytes( public void readShorts( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -318,7 +318,7 @@ public void readShorts( public void readLongs( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -352,7 +352,7 @@ public void readLongs( public void readFloats( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -386,7 +386,7 @@ public void readFloats( public void readDoubles( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -420,7 +420,7 @@ public void readDoubles( public void readBinarys( int total, - MutableColumnVector c, + WritableColumnVector c, int rowId, int level, VectorizedValuesReader data) { @@ -458,8 +458,8 @@ public void readBinarys( */ public void readIntegers( int total, - MutableColumnVector values, - MutableColumnVector nulls, + WritableColumnVector values, + WritableColumnVector nulls, int rowId, int level, VectorizedValuesReader data) { @@ -496,7 +496,7 @@ public void readIntegers( // IDs. This is different than the above APIs that decodes definitions levels along with values. // Since this is only used to decode dictionary IDs, only decoding integers is supported. @Override - public void readIntegers(int total, MutableColumnVector c, int rowId) { + public void readIntegers(int total, WritableColumnVector c, int rowId) { int left = total; while (left > 0) { if (this.currentCount == 0) this.readNextGroup(); @@ -522,32 +522,32 @@ public byte readByte() { } @Override - public void readBytes(int total, MutableColumnVector c, int rowId) { + public void readBytes(int total, WritableColumnVector c, int rowId) { throw new UnsupportedOperationException("only readInts is valid."); } @Override - public void readLongs(int total, MutableColumnVector c, int rowId) { + public void readLongs(int total, WritableColumnVector c, int rowId) { throw new UnsupportedOperationException("only readInts is valid."); } @Override - public void readBinary(int total, MutableColumnVector c, int rowId) { + public void readBinary(int total, WritableColumnVector c, int rowId) { throw new UnsupportedOperationException("only readInts is valid."); } @Override - public void readBooleans(int total, MutableColumnVector c, int rowId) { + public void readBooleans(int total, WritableColumnVector c, int rowId) { throw new UnsupportedOperationException("only readInts is valid."); } @Override - public void readFloats(int total, MutableColumnVector c, int rowId) { + public void readFloats(int total, WritableColumnVector c, int rowId) { throw new UnsupportedOperationException("only readInts is valid."); } @Override - public void readDoubles(int total, MutableColumnVector c, int rowId) { + public void readDoubles(int total, WritableColumnVector c, int rowId) { throw new UnsupportedOperationException("only readInts is valid."); } diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java index c1b86daa815f9..57d92ae27ece8 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java @@ -17,7 +17,7 @@ package org.apache.spark.sql.execution.datasources.parquet; -import org.apache.spark.sql.execution.vectorized.MutableColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; import org.apache.parquet.io.api.Binary; @@ -37,11 +37,11 @@ public interface VectorizedValuesReader { /* * Reads `total` values into `c` start at `c[rowId]` */ - void readBooleans(int total, MutableColumnVector c, int rowId); - void readBytes(int total, MutableColumnVector c, int rowId); - void readIntegers(int total, MutableColumnVector c, int rowId); - void readLongs(int total, MutableColumnVector c, int rowId); - void readFloats(int total, MutableColumnVector c, int rowId); - void readDoubles(int total, MutableColumnVector c, int rowId); - void readBinary(int total, MutableColumnVector c, int rowId); + void readBooleans(int total, WritableColumnVector c, int rowId); + void readBytes(int total, WritableColumnVector c, int rowId); + void readIntegers(int total, WritableColumnVector c, int rowId); + void readLongs(int total, WritableColumnVector c, int rowId); + void readFloats(int total, WritableColumnVector c, int rowId); + void readDoubles(int total, WritableColumnVector c, int rowId); + void readBinary(int total, WritableColumnVector c, int rowId); } diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java index 7030bfd235a3e..adb859ed17757 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVectorUtils.java @@ -40,7 +40,7 @@ public class ColumnVectorUtils { /** * Populates the entire `col` with `row[fieldIdx]` */ - public static void populate(MutableColumnVector col, InternalRow row, int fieldIdx) { + public static void populate(WritableColumnVector col, InternalRow row, int fieldIdx) { int capacity = col.capacity; DataType t = col.dataType(); @@ -115,7 +115,7 @@ public static Object toPrimitiveJavaArray(ColumnVector.Array array) { } } - private static void appendValue(MutableColumnVector dst, DataType t, Object o) { + private static void appendValue(WritableColumnVector dst, DataType t, Object o) { if (o == null) { if (t instanceof CalendarIntervalType) { dst.appendStruct(true); @@ -165,7 +165,7 @@ private static void appendValue(MutableColumnVector dst, DataType t, Object o) { } } - private static void appendValue(MutableColumnVector dst, DataType t, Row src, int fieldIdx) { + private static void appendValue(WritableColumnVector dst, DataType t, Row src, int fieldIdx) { if (t instanceof ArrayType) { ArrayType at = (ArrayType)t; if (src.isNullAt(fieldIdx)) { @@ -199,7 +199,7 @@ private static void appendValue(MutableColumnVector dst, DataType t, Row src, in public static ColumnarBatch toBatch( StructType schema, MemoryMode memMode, Iterator row) { int capacity = ColumnarBatch.DEFAULT_BATCH_SIZE; - MutableColumnVector[] columnVectors; + WritableColumnVector[] columnVectors; if (memMode == MemoryMode.OFF_HEAP) { columnVectors = OffHeapColumnVector.allocateColumns(capacity, schema); } else { diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java index 6aba225574912..91ef53e23ff37 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java @@ -293,68 +293,68 @@ public void update(int ordinal, Object value) { @Override public void setNullAt(int ordinal) { - getColumnAsMutable(ordinal).putNull(rowId); + getColumnAsWritable(ordinal).putNull(rowId); } @Override public void setBoolean(int ordinal, boolean value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putBoolean(rowId, value); } @Override public void setByte(int ordinal, byte value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putByte(rowId, value); } @Override public void setShort(int ordinal, short value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putShort(rowId, value); } @Override public void setInt(int ordinal, int value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putInt(rowId, value); } @Override public void setLong(int ordinal, long value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putLong(rowId, value); } @Override public void setFloat(int ordinal, float value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putFloat(rowId, value); } @Override public void setDouble(int ordinal, double value) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putDouble(rowId, value); } @Override public void setDecimal(int ordinal, Decimal value, int precision) { - MutableColumnVector column = getColumnAsMutable(ordinal); + WritableColumnVector column = getColumnAsWritable(ordinal); column.putNotNull(rowId); column.putDecimal(rowId, value, precision); } - private MutableColumnVector getColumnAsMutable(int ordinal) { - assert (columns[ordinal] instanceof MutableColumnVector); - MutableColumnVector column = (MutableColumnVector) columns[ordinal]; + private WritableColumnVector getColumnAsWritable(int ordinal) { + assert (columns[ordinal] instanceof WritableColumnVector); + WritableColumnVector column = (WritableColumnVector) columns[ordinal]; assert (!column.isConstant); return column; } @@ -401,8 +401,8 @@ public void remove() { */ public void reset() { for (int i = 0; i < numCols(); ++i) { - if (columns[i] instanceof MutableColumnVector) { - ((MutableColumnVector) columns[i]).reset(); + if (columns[i] instanceof WritableColumnVector) { + ((WritableColumnVector) columns[i]).reset(); } } if (this.numRowsFiltered > 0) { diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OffHeapColumnVector.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OffHeapColumnVector.java index 0fe5e99bb4053..0ec256967bed7 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OffHeapColumnVector.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OffHeapColumnVector.java @@ -25,7 +25,7 @@ /** * Column data backed using offheap memory. */ -public final class OffHeapColumnVector extends MutableColumnVector { +public final class OffHeapColumnVector extends WritableColumnVector { private static final boolean bigEndianPlatform = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN); diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java index 69f6b71e67233..288a15f74abcf 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java @@ -27,7 +27,7 @@ * A column backed by an in memory JVM array. This stores the NULLs as a byte per value * and a java array for the values. */ -public final class OnHeapColumnVector extends MutableColumnVector { +public final class OnHeapColumnVector extends WritableColumnVector { private static final boolean bigEndianPlatform = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN); diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/MutableColumnVector.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/WritableColumnVector.java similarity index 95% rename from sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/MutableColumnVector.java rename to sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/WritableColumnVector.java index 6944b23c13989..8ef74f2f748cb 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/MutableColumnVector.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/WritableColumnVector.java @@ -38,7 +38,7 @@ * A ColumnVector should be considered immutable once originally created. In other words, it is not * valid to call put APIs after reads until reset() is called. */ -public abstract class MutableColumnVector extends ColumnVector { +public abstract class WritableColumnVector extends ColumnVector { /** * Resets this column for writing. The currently stored values are no longer accessible. @@ -48,7 +48,7 @@ public void reset() { if (childColumns != null) { for (ColumnVector c: childColumns) { - ((MutableColumnVector) c).reset(); + ((WritableColumnVector) c).reset(); } } numNulls = 0; @@ -511,9 +511,9 @@ public final int appendStruct(boolean isNull) { appendNull(); for (ColumnVector c: childColumns) { if (c.type instanceof StructType) { - ((MutableColumnVector) c).appendStruct(true); + ((WritableColumnVector) c).appendStruct(true); } else { - ((MutableColumnVector) c).appendNull(); + ((WritableColumnVector) c).appendNull(); } } } else { @@ -526,14 +526,14 @@ public final int appendStruct(boolean isNull) { * Returns the data for the underlying array. */ @Override - public MutableColumnVector arrayData() { return (MutableColumnVector) childColumns[0]; } + public WritableColumnVector arrayData() { return (WritableColumnVector) childColumns[0]; } /** * Returns the ordinal's child data column. */ @Override - public MutableColumnVector getChildColumn(int ordinal) { - return (MutableColumnVector) childColumns[ordinal]; + public WritableColumnVector getChildColumn(int ordinal) { + return (WritableColumnVector) childColumns[ordinal]; } /** @@ -578,8 +578,8 @@ public void setDictionary(Dictionary dictionary) { /** * Reserve a integer column for ids of dictionary. */ - public MutableColumnVector reserveDictionaryIds(int capacity) { - MutableColumnVector dictionaryIds = (MutableColumnVector) this.dictionaryIds; + public WritableColumnVector reserveDictionaryIds(int capacity) { + WritableColumnVector dictionaryIds = (WritableColumnVector) this.dictionaryIds; if (dictionaryIds == null) { dictionaryIds = reserveNewColumn(capacity, DataTypes.IntegerType); this.dictionaryIds = dictionaryIds; @@ -594,14 +594,14 @@ public MutableColumnVector reserveDictionaryIds(int capacity) { * Returns the underlying integer column for ids of dictionary. */ @Override - public MutableColumnVector getDictionaryIds() { - return (MutableColumnVector) dictionaryIds; + public WritableColumnVector getDictionaryIds() { + return (WritableColumnVector) dictionaryIds; } /** * Reserve a new column. */ - protected abstract MutableColumnVector reserveNewColumn(int capacity, DataType type); + protected abstract WritableColumnVector reserveNewColumn(int capacity, DataType type); /** * Initialize child columns. @@ -647,7 +647,7 @@ protected void initialize() { * Sets up the common state and also handles creating the child columns if this is a nested * type. */ - protected MutableColumnVector(int capacity, DataType type) { + protected WritableColumnVector(int capacity, DataType type) { super(capacity, type); } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala index 4b6a31135fce9..1331f157363b0 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala @@ -24,9 +24,9 @@ import scala.util.Random import org.apache.spark.memory.MemoryMode import org.apache.spark.sql.catalyst.expressions.UnsafeRow import org.apache.spark.sql.execution.vectorized.ColumnVector -import org.apache.spark.sql.execution.vectorized.MutableColumnVector import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector +import org.apache.spark.sql.execution.vectorized.WritableColumnVector import org.apache.spark.sql.types.{BinaryType, DataType, IntegerType} import org.apache.spark.unsafe.Platform import org.apache.spark.util.Benchmark @@ -37,7 +37,7 @@ import org.apache.spark.util.collection.BitSet */ object ColumnarBatchBenchmark { - def allocate(capacity: Int, dt: DataType, memMode: MemoryMode): MutableColumnVector = { + def allocate(capacity: Int, dt: DataType, memMode: MemoryMode): WritableColumnVector = { if (memMode == MemoryMode.OFF_HEAP) { new OffHeapColumnVector(capacity, dt) } else { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala index 857ea9727d53e..08ccbd628cf8f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala @@ -35,7 +35,7 @@ import org.apache.spark.unsafe.types.CalendarInterval class ColumnarBatchSuite extends SparkFunSuite { - def allocate(capacity: Int, dt: DataType, memMode: MemoryMode): MutableColumnVector = { + def allocate(capacity: Int, dt: DataType, memMode: MemoryMode): WritableColumnVector = { if (memMode == MemoryMode.OFF_HEAP) { new OffHeapColumnVector(capacity, dt) } else {