Skip to content

Commit

Permalink
Added options to fine-tune settings for bulk operations (#43509)
Browse files Browse the repository at this point in the history
* Added option to override configuration for `minTargetBulkBatchSize´

* Added changelogs

* Update PartitionScopeThresholds.java
  • Loading branch information
FabianMeiswinkel authored Dec 26, 2024
1 parent f908324 commit 472dce0
Show file tree
Hide file tree
Showing 13 changed files with 141 additions and 32 deletions.
1 change: 1 addition & 0 deletions sdk/cosmos/azure-cosmos-spark_3-1_2-12/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#### Bugs Fixed

#### Other Changes
* Added options to fine-tune settings for bulk operations. - [PR 43509](https://github.com/Azure/azure-sdk-for-java/pull/43509)

### 4.35.0 (2024-11-27)

Expand Down
1 change: 1 addition & 0 deletions sdk/cosmos/azure-cosmos-spark_3-2_2-12/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#### Bugs Fixed

#### Other Changes
* Added options to fine-tune settings for bulk operations. - [PR 43509](https://github.com/Azure/azure-sdk-for-java/pull/43509)

### 4.35.0 (2024-11-27)

Expand Down
1 change: 1 addition & 0 deletions sdk/cosmos/azure-cosmos-spark_3-3_2-12/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#### Bugs Fixed

#### Other Changes
* Added options to fine-tune settings for bulk operations. - [PR 43509](https://github.com/Azure/azure-sdk-for-java/pull/43509)

### 4.35.0 (2024-11-27)

Expand Down
1 change: 1 addition & 0 deletions sdk/cosmos/azure-cosmos-spark_3-4_2-12/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#### Bugs Fixed

#### Other Changes
* Added options to fine-tune settings for bulk operations. - [PR 43509](https://github.com/Azure/azure-sdk-for-java/pull/43509)

### 4.35.0 (2024-11-27)

Expand Down
1 change: 1 addition & 0 deletions sdk/cosmos/azure-cosmos-spark_3-5_2-12/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#### Bugs Fixed

#### Other Changes
* Added options to fine-tune settings for bulk operations. - [PR 43509](https://github.com/Azure/azure-sdk-for-java/pull/43509)

### 4.35.0 (2024-11-27)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ private[spark] object CosmosConfigNames {
val WriteBulkEnabled = "spark.cosmos.write.bulk.enabled"
val WriteBulkMaxPendingOperations = "spark.cosmos.write.bulk.maxPendingOperations"
val WriteBulkMaxBatchSize = "spark.cosmos.write.bulk.maxBatchSize"
val WriteBulkMinTargetBatchSize = "spark.cosmos.write.bulk.minTargetBatchSize"
val WriteBulkMaxConcurrentPartitions = "spark.cosmos.write.bulk.maxConcurrentCosmosPartitions"
val WriteBulkPayloadSizeInBytes = "spark.cosmos.write.bulk.targetedPayloadSizeInBytes"
val WriteBulkInitialBatchSize = "spark.cosmos.write.bulk.initialBatchSize"
Expand Down Expand Up @@ -195,6 +196,7 @@ private[spark] object CosmosConfigNames {
WriteBulkPayloadSizeInBytes,
WriteBulkInitialBatchSize,
WriteBulkMaxBatchSize,
WriteBulkMinTargetBatchSize,
WritePointMaxConcurrency,
WritePatchDefaultOperationType,
WritePatchColumnConfigs,
Expand Down Expand Up @@ -1162,6 +1164,7 @@ private case class CosmosWriteConfig(itemWriteStrategy: ItemWriteStrategy,
maxMicroBatchPayloadSizeInBytes: Option[Int] = None,
initialMicroBatchSize: Option[Int] = None,
maxMicroBatchSize: Option[Int] = None,
minTargetMicroBatchSize: Option[Int] = None,
flushCloseIntervalInSeconds: Int = 60,
maxNoProgressIntervalInSeconds: Int = 180,
maxRetryNoProgressIntervalInSeconds: Int = 45 * 60,
Expand Down Expand Up @@ -1207,6 +1210,15 @@ private object CosmosWriteConfig {
"too many RUs and you cannot enable thoughput control. NOTE: using throuhgput control is preferred and will." +
"result in better throughput while still limiting the RU/s used.")

private val minTargetMicroBatchSize = CosmosConfigEntry[Int](key = CosmosConfigNames.WriteBulkMinTargetBatchSize,
defaultValue = Option.apply(Configs.getMinTargetBulkMicroBatchSize),
mandatory = false,
parseFromStringFunction = minTargetBatchSizeString => Math.min(minTargetBatchSizeString.toInt, Configs.getMinTargetBulkMicroBatchSize),
helpMessage = "Cosmos DB min. target bulk micro batch size - a micro batch will be flushed to the backend " +
"when the number of documents enqueued exceeds the target micro batch size. The target micro batch size is " +
"calculated based on the throttling rate. This setting can be used to force the target batch size to have " +
" at least a certain size. NOTE: This should only be modified in rare edge cases.")

private val bulkMaxPendingOperations = CosmosConfigEntry[Int](key = CosmosConfigNames.WriteBulkMaxPendingOperations,
mandatory = false,
parseFromStringFunction = bulkMaxConcurrencyAsString => bulkMaxConcurrencyAsString.toInt,
Expand Down Expand Up @@ -1445,6 +1457,7 @@ private object CosmosWriteConfig {
val microBatchPayloadSizeInBytesOpt = CosmosConfigEntry.parse(cfg, microBatchPayloadSizeInBytes)
val initialBatchSizeOpt = CosmosConfigEntry.parse(cfg, initialMicroBatchSize)
val maxBatchSizeOpt = CosmosConfigEntry.parse(cfg, maxMicroBatchSize)
val minTargetBatchSizeOpt = CosmosConfigEntry.parse(cfg, minTargetMicroBatchSize)
val writeRetryCommitInterceptor = CosmosConfigEntry
.parse(cfg, writeOnRetryCommitInterceptor).flatten

Expand Down Expand Up @@ -1477,6 +1490,7 @@ private object CosmosWriteConfig {
maxMicroBatchPayloadSizeInBytes = microBatchPayloadSizeInBytesOpt,
initialMicroBatchSize = initialBatchSizeOpt,
maxMicroBatchSize = maxBatchSizeOpt,
minTargetMicroBatchSize = minTargetBatchSizeOpt,
flushCloseIntervalInSeconds = CosmosConfigEntry.parse(cfg, flushCloseIntervalInSeconds).get,
maxNoProgressIntervalInSeconds = CosmosConfigEntry.parse(cfg, maxNoProgressIntervalInSeconds).get,
maxRetryNoProgressIntervalInSeconds = CosmosConfigEntry.parse(cfg, maxRetryNoProgressIntervalInSeconds).get,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,20 @@ class SparkE2EWriteITest
itemWriteStrategy: ItemWriteStrategy,
hasId: Boolean = true,
initialBatchSize: Option[Int] = None,
maxBatchSize: Option[Int] = None)
maxBatchSize: Option[Int] = None,
minTargetBatchSize: Option[Int] = None)

private val upsertParameterTest = Seq(
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = None, maxBatchSize = None),
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = Some(1), maxBatchSize = None),
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = Some(1), maxBatchSize = Some(5)),
UpsertParameterTest(bulkEnabled = false, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = None, maxBatchSize = None),
UpsertParameterTest(bulkEnabled = false, itemWriteStrategy = ItemWriteStrategy.ItemAppend, initialBatchSize = None, maxBatchSize = None)
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = None, maxBatchSize = None, minTargetBatchSize = None),
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = Some(1), maxBatchSize = None, minTargetBatchSize = None),
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = Some(1), maxBatchSize = Some(5), minTargetBatchSize = None),
UpsertParameterTest(bulkEnabled = true, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = Some(1), maxBatchSize = Some(5), minTargetBatchSize = Some(2)),
UpsertParameterTest(bulkEnabled = false, itemWriteStrategy = ItemWriteStrategy.ItemOverwrite, initialBatchSize = None, maxBatchSize = None, minTargetBatchSize = None),
UpsertParameterTest(bulkEnabled = false, itemWriteStrategy = ItemWriteStrategy.ItemAppend, initialBatchSize = None, maxBatchSize = None, minTargetBatchSize = None)
)

for (UpsertParameterTest(bulkEnabled, itemWriteStrategy, hasId, initialBatchSize, maxBatchSize) <- upsertParameterTest) {
it should s"support upserts with bulkEnabled = $bulkEnabled itemWriteStrategy = $itemWriteStrategy hasId = $hasId initialBatchSize = $initialBatchSize, maxBatchSize = $maxBatchSize" in {
for (UpsertParameterTest(bulkEnabled, itemWriteStrategy, hasId, initialBatchSize, maxBatchSize, minTargetBatchSize) <- upsertParameterTest) {
it should s"support upserts with bulkEnabled = $bulkEnabled itemWriteStrategy = $itemWriteStrategy hasId = $hasId initialBatchSize = $initialBatchSize, maxBatchSize = $maxBatchSize, minTargetBatchSize = $minTargetBatchSize" in {
val cosmosEndpoint = TestConfigurations.HOST
val cosmosMasterKey = TestConfigurations.MASTER_KEY

Expand Down Expand Up @@ -90,6 +92,18 @@ class SparkE2EWriteITest
case None =>
}

minTargetBatchSize match {
case Some(customMinTargetBatchSize) =>
configMapBuilder += (
"spark.cosmos.write.bulk.minTargetBatchSize" -> customMinTargetBatchSize.toString,
)

configOverrideMapBuilder += (
"spark.cosmos.write.bulk.minTargetBatchSize" -> customMinTargetBatchSize.toString,
)
case None =>
}

val cfg = configMapBuilder.toMap

val cfgOverwrite = configOverrideMapBuilder.toMap
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public void neverThrottledShouldResultInMaxBatchSize() {
}

@Test(groups = { "unit" })
public void alwaysThrottledShouldResultInBatSizeOfOne() {
public void alwaysThrottledShouldResultInBatchSizeOfOne() {
String pkRangeId = UUID.randomUUID().toString();
PartitionScopeThresholds thresholds =
new PartitionScopeThresholds(pkRangeId, new CosmosBulkExecutionOptionsImpl());
Expand Down Expand Up @@ -71,5 +71,13 @@ public void initialTargetMicroBatchSize() {
bulkOperations.setMaxMicroBatchSize(maxBatchSize);
thresholds = new PartitionScopeThresholds(pkRangeId, bulkOperations);
assertThat(thresholds.getTargetMicroBatchSizeSnapshot()).isEqualTo(maxBatchSize);

// initial targetBatchSize should be at least by minTargetBatchSize
int minTargetBatchSize = 5;
bulkOperations = new CosmosBulkExecutionOptionsImpl();
bulkOperations.setInitialMicroBatchSize(1);
bulkOperations.setMinTargetMicroBatchSize(minTargetBatchSize);
thresholds = new PartitionScopeThresholds(pkRangeId, bulkOperations);
assertThat(thresholds.getTargetMicroBatchSizeSnapshot()).isEqualTo(minTargetBatchSize);
}
}
4 changes: 2 additions & 2 deletions sdk/cosmos/azure-cosmos/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
* Added support to enable http2 for gateway mode with system property `COSMOS.HTTP2_ENABLED` and system variable `COSMOS_HTTP2_ENABLED`. - [PR 42947](https://github.com/Azure/azure-sdk-for-java/pull/42947)
* Added support to allow changing http2 max connection pool size with system property `COSMOS.HTTP2_MAX_CONNECTION_POOL_SIZE` and system variable `COSMOS_HTTP2_MAX_CONNECTION_POOL_SIZE`. - [PR 42947](https://github.com/Azure/azure-sdk-for-java/pull/42947)
* Added support to allow changing http2 max connection pool size with system property `COSMOS.HTTP2_MIN_CONNECTION_POOL_SIZE` and system variable `COSMOS_HTTP2_MIN_CONNECTION_POOL_SIZE`. - [PR 42947](https://github.com/Azure/azure-sdk-for-java/pull/42947)
* Added support to allow changing http2 max connection pool size with system property `COSMOS.HTTP2_MAX_CONCURRENT_STREAMS` and system variable `COSMOS_HTTP2_MAX_CONCURRENT_STREAMS`. - [PR 42947](https://github.com/Azure/azure-sdk-for-java/pull/42947)

* Added options to fine-tune settings for bulk operations. - [PR 43509](https://github.com/Azure/azure-sdk-for-java/pull/43509)
### 4.65.0 (2024-11-19)

#### Features Added
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,19 @@ public class Configs {
public static final String PREVENT_INVALID_ID_CHARS_VARIABLE = "COSMOS_PREVENT_INVALID_ID_CHARS";
public static final boolean DEFAULT_PREVENT_INVALID_ID_CHARS = false;

// Bulk default settings
public static final String MIN_TARGET_BULK_MICRO_BATCH_SIZE = "COSMOS.MIN_TARGET_BULK_MICRO_BATCH_SIZE";
public static final String MIN_TARGET_BULK_MICRO_BATCH_SIZE_VARIABLE = "COSMOS_MIN_TARGET_BULK_MICRO_BATCH_SIZE";
public static final int DEFAULT_MIN_TARGET_BULK_MICRO_BATCH_SIZE = 1;

public static final String MAX_BULK_MICRO_BATCH_CONCURRENCY = "COSMOS.MAX_BULK_MICRO_BATCH_CONCURRENCY";
public static final String MAX_BULK_MICRO_BATCH_CONCURRENCY_VARIABLE = "COSMOS_MAX_BULK_MICRO_BATCH_CONCURRENCY";
public static final int DEFAULT_MAX_BULK_MICRO_BATCH_CONCURRENCY = 1;

public static final String MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS = "COSMOS.MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS";
public static final String MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS_VARIABLE = "COSMOS_MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS";
public static final int DEFAULT_MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS = 1000;

// Config of CodingErrorAction on charset decoder for malformed input
public static final String CHARSET_DECODER_ERROR_ACTION_ON_MALFORMED_INPUT = "COSMOS.CHARSET_DECODER_ERROR_ACTION_ON_MALFORMED_INPUT";
public static final String DEFAULT_CHARSET_DECODER_ERROR_ACTION_ON_MALFORMED_INPUT = StringUtils.EMPTY;
Expand Down Expand Up @@ -495,6 +508,48 @@ public static boolean isIdValueValidationEnabled() {
return DEFAULT_PREVENT_INVALID_ID_CHARS;
}

public static int getMinTargetBulkMicroBatchSize() {
String valueFromSystemProperty = System.getProperty(MIN_TARGET_BULK_MICRO_BATCH_SIZE);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.parseInt(valueFromSystemProperty);
}

String valueFromEnvVariable = System.getenv(MIN_TARGET_BULK_MICRO_BATCH_SIZE_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.parseInt(valueFromEnvVariable);
}

return DEFAULT_MIN_TARGET_BULK_MICRO_BATCH_SIZE;
}

public static int getMaxBulkMicroBatchConcurrency() {
String valueFromSystemProperty = System.getProperty(MAX_BULK_MICRO_BATCH_CONCURRENCY);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.parseInt(valueFromSystemProperty);
}

String valueFromEnvVariable = System.getenv(MAX_BULK_MICRO_BATCH_CONCURRENCY_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.parseInt(valueFromEnvVariable);
}

return DEFAULT_MAX_BULK_MICRO_BATCH_CONCURRENCY;
}

public static int getMaxBulkMicroBatchFlushIntervalInMs() {
String valueFromSystemProperty = System.getProperty(MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.parseInt(valueFromSystemProperty);
}

String valueFromEnvVariable = System.getenv(MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.parseInt(valueFromEnvVariable);
}

return DEFAULT_MAX_BULK_MICRO_BATCH_FLUSH_INTERVAL_IN_MILLISECONDS;
}

public static int getMaxHttpRequestTimeout() {
String valueFromSystemProperty = System.getProperty(HTTP_MAX_REQUEST_TIMEOUT);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,14 @@
*/
public class CosmosBulkExecutionOptionsImpl implements OverridableRequestOptions {
private int initialMicroBatchSize = BatchRequestResponseConstants.MAX_OPERATIONS_IN_DIRECT_MODE_BATCH_REQUEST;
private int maxMicroBatchConcurrency = BatchRequestResponseConstants.DEFAULT_MAX_MICRO_BATCH_CONCURRENCY;

private int maxMicroBatchConcurrency = Configs.getMaxBulkMicroBatchConcurrency();
private int minTargetMicroBatchSize = Configs.getMinTargetBulkMicroBatchSize();
private int maxMicroBatchSize = BatchRequestResponseConstants.MAX_OPERATIONS_IN_DIRECT_MODE_BATCH_REQUEST;
private double maxMicroBatchRetryRate = BatchRequestResponseConstants.DEFAULT_MAX_MICRO_BATCH_RETRY_RATE;
private double minMicroBatchRetryRate = BatchRequestResponseConstants.DEFAULT_MIN_MICRO_BATCH_RETRY_RATE;

private int maxMicroBatchPayloadSizeInBytes = BatchRequestResponseConstants.DEFAULT_MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES;
private final Duration maxMicroBatchInterval = Duration.ofMillis(
BatchRequestResponseConstants.DEFAULT_MAX_MICRO_BATCH_INTERVAL_IN_MILLISECONDS);
private final Duration maxMicroBatchInterval = Duration.ofMillis(Configs.getMaxBulkMicroBatchFlushIntervalInMs());
private final Object legacyBatchScopedContext;
private final CosmosBulkExecutionThresholdsState thresholds;
private Integer maxConcurrentCosmosPartitions = null;
Expand All @@ -60,6 +59,7 @@ public CosmosBulkExecutionOptionsImpl(CosmosBulkExecutionOptionsImpl toBeCloned)
this.initialMicroBatchSize = toBeCloned.initialMicroBatchSize;
this.maxMicroBatchConcurrency = toBeCloned.maxMicroBatchConcurrency;
this.maxMicroBatchSize = toBeCloned.maxMicroBatchSize;
this.minTargetMicroBatchSize = toBeCloned.minTargetMicroBatchSize;
this.maxMicroBatchRetryRate = toBeCloned.maxMicroBatchRetryRate;
this.minMicroBatchRetryRate = toBeCloned.minMicroBatchRetryRate;
this.maxMicroBatchPayloadSizeInBytes = toBeCloned.maxMicroBatchPayloadSizeInBytes;
Expand Down Expand Up @@ -130,6 +130,14 @@ public void setMaxMicroBatchSize(int maxMicroBatchSize) {
this.maxMicroBatchSize = maxMicroBatchSize;
}

public int getMinTargetMicroBatchSize() {
return minTargetMicroBatchSize;
}

public void setMinTargetMicroBatchSize(int minTargetMicroBatchSize) {
this.minTargetMicroBatchSize = minTargetMicroBatchSize;
}

public CosmosItemSerializer getCustomItemSerializer() {
return this.customSerializer;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,31 +12,22 @@ public final class BatchRequestResponseConstants {
// Size limits:
public static final int DEFAULT_MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES = 220201;
public static final int MAX_OPERATIONS_IN_DIRECT_MODE_BATCH_REQUEST = 100;

public static final int DEFAULT_MAX_MICRO_BATCH_INTERVAL_IN_MILLISECONDS = 1000;
public static final int DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS = 100;
public static final int DEFAULT_MAX_MICRO_BATCH_CONCURRENCY = 1;
public static final double DEFAULT_MIN_MICRO_BATCH_RETRY_RATE = 0.1;
public static final double DEFAULT_MAX_MICRO_BATCH_RETRY_RATE = 0.2;

static final String FIELD_OPERATION_TYPE = "operationType";
static final String FIELD_RESOURCE_TYPE = "resourceType";
static final String FIELD_TIME_TO_LIVE_IN_SECONDS = "timeToLiveInSeconds";
static final String FIELD_ID = "id";
static final String FIELD_INDEXING_DIRECTIVE = "indexingDirective";
static final String FIELD_IF_MATCH = "ifMatch";
static final String FIELD_IF_NONE_MATCH = "ifNoneMatch";
static final String FIELD_PARTITION_KEY = "partitionKey";
static final String FIELD_RESOURCE_BODY = "resourceBody";
static final String FIELD_BINARY_ID = "binaryId";
static final String FIELD_EFFECTIVE_PARTITIONKEY = "effectivePartitionKey";
static final String FIELD_STATUS_CODE = "statusCode";
static final String FIELD_SUBSTATUS_CODE = "subStatusCode";
static final String FIELD_REQUEST_CHARGE = "requestCharge";
static final String FIELD_RETRY_AFTER_MILLISECONDS = "retryAfterMilliseconds";
static final String FIELD_ETAG = "eTag";
static final String FIELD_MINIMAL_RETURN_PREFERENCE = "minimalReturnPreference";
static final String FIELD_IS_CLIENTENCRYPTED = "isClientEncrypted";

// Batch supported operation type for json
public static final String OPERATION_CREATE = "Create";
Expand Down
Loading

0 comments on commit 472dce0

Please sign in to comment.