Skip to content

Commit 1fff2b4

Browse files
zhichao-awsheemin32
authored andcommitted
[Backport] manually backport 988 to 2.x (opensearch-project#1030)
* [Enhancement] Implement pruning for neural sparse search (opensearch-project#988) * add impl Signed-off-by: zhichao-aws <zhichaog@amazon.com> * add UT Signed-off-by: zhichao-aws <zhichaog@amazon.com> * rename pruneType; UT Signed-off-by: zhichao-aws <zhichaog@amazon.com> * changelog Signed-off-by: zhichao-aws <zhichaog@amazon.com> * ut Signed-off-by: zhichao-aws <zhichaog@amazon.com> * add it Signed-off-by: zhichao-aws <zhichaog@amazon.com> * change on 2-phase Signed-off-by: zhichao-aws <zhichaog@amazon.com> * UT Signed-off-by: zhichao-aws <zhichaog@amazon.com> * it Signed-off-by: zhichao-aws <zhichaog@amazon.com> * rename Signed-off-by: zhichao-aws <zhichaog@amazon.com> * enhance: more detailed error message Signed-off-by: zhichao-aws <zhichaog@amazon.com> * refactor to prune and split Signed-off-by: zhichao-aws <zhichaog@amazon.com> * changelog Signed-off-by: zhichao-aws <zhichaog@amazon.com> * fix UT cov Signed-off-by: zhichao-aws <zhichaog@amazon.com> * address review comments Signed-off-by: zhichao-aws <zhichaog@amazon.com> * enlarge score diff range Signed-off-by: zhichao-aws <zhichaog@amazon.com> * address comments: check lowScores non null instead of flag Signed-off-by: zhichao-aws <zhichaog@amazon.com> --------- Signed-off-by: zhichao-aws <zhichaog@amazon.com> (cherry picked from commit e8fe284) * fix toList for jvm version Signed-off-by: zhichao-aws <zhichaog@amazon.com> * adapt for the gap of batch ingest between 2.x main Signed-off-by: zhichao-aws <zhichaog@amazon.com> --------- Signed-off-by: zhichao-aws <zhichaog@amazon.com>
1 parent f3a58e9 commit 1fff2b4

18 files changed

+1197
-140
lines changed

CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
1717
### Features
1818
### Enhancements
1919
- Explainability in hybrid query ([#970](https://github.com/opensearch-project/neural-search/pull/970))
20+
- Implement pruning for neural sparse ingestion pipeline and two phase search processor ([#988](https://github.com/opensearch-project/neural-search/pull/988))
2021
- Support new knn query parameter expand_nested ([#1013](https://github.com/opensearch-project/neural-search/pull/1013))
2122
### Bug Fixes
2223
- Address inconsistent scoring in hybrid query results ([#998](https://github.com/opensearch-project/neural-search/pull/998))

src/main/java/org/opensearch/neuralsearch/processor/NeuralSparseTwoPhaseProcessor.java

+41-56
Original file line numberDiff line numberDiff line change
@@ -9,23 +9,22 @@
99
import lombok.Getter;
1010
import lombok.Setter;
1111
import org.opensearch.action.search.SearchRequest;
12-
import org.opensearch.common.collect.Tuple;
1312
import org.opensearch.index.query.BoolQueryBuilder;
1413
import org.opensearch.index.query.QueryBuilder;
1514
import org.opensearch.ingest.ConfigurationUtils;
1615
import org.opensearch.neuralsearch.query.NeuralSparseQueryBuilder;
16+
import org.opensearch.neuralsearch.util.prune.PruneType;
17+
import org.opensearch.neuralsearch.util.prune.PruneUtils;
1718
import org.opensearch.search.builder.SearchSourceBuilder;
1819
import org.opensearch.search.pipeline.AbstractProcessor;
1920
import org.opensearch.search.pipeline.Processor;
2021
import org.opensearch.search.pipeline.SearchRequestProcessor;
2122
import org.opensearch.search.rescore.QueryRescorerBuilder;
2223
import org.opensearch.search.rescore.RescorerBuilder;
2324

24-
import java.util.Collections;
2525
import java.util.Locale;
2626
import java.util.Map;
2727
import java.util.Objects;
28-
import java.util.stream.Collectors;
2928

3029
/**
3130
* A SearchRequestProcessor to generate two-phase NeuralSparseQueryBuilder,
@@ -37,41 +36,37 @@ public class NeuralSparseTwoPhaseProcessor extends AbstractProcessor implements
3736

3837
public static final String TYPE = "neural_sparse_two_phase_processor";
3938
private boolean enabled;
40-
private float ratio;
39+
private float pruneRatio;
40+
private PruneType pruneType;
4141
private float windowExpansion;
4242
private int maxWindowSize;
4343
private static final String PARAMETER_KEY = "two_phase_parameter";
44-
private static final String RATIO_KEY = "prune_ratio";
4544
private static final String ENABLE_KEY = "enabled";
4645
private static final String EXPANSION_KEY = "expansion_rate";
4746
private static final String MAX_WINDOW_SIZE_KEY = "max_window_size";
4847
private static final boolean DEFAULT_ENABLED = true;
4948
private static final float DEFAULT_RATIO = 0.4f;
49+
private static final PruneType DEFAULT_PRUNE_TYPE = PruneType.MAX_RATIO;
5050
private static final float DEFAULT_WINDOW_EXPANSION = 5.0f;
5151
private static final int DEFAULT_MAX_WINDOW_SIZE = 10000;
5252
private static final int DEFAULT_BASE_QUERY_SIZE = 10;
5353
private static final int MAX_WINDOWS_SIZE_LOWER_BOUND = 50;
5454
private static final float WINDOW_EXPANSION_LOWER_BOUND = 1.0f;
55-
private static final float RATIO_LOWER_BOUND = 0f;
56-
private static final float RATIO_UPPER_BOUND = 1f;
5755

5856
protected NeuralSparseTwoPhaseProcessor(
5957
String tag,
6058
String description,
6159
boolean ignoreFailure,
6260
boolean enabled,
63-
float ratio,
61+
float pruneRatio,
62+
PruneType pruneType,
6463
float windowExpansion,
6564
int maxWindowSize
6665
) {
6766
super(tag, description, ignoreFailure);
6867
this.enabled = enabled;
69-
if (ratio < RATIO_LOWER_BOUND || ratio > RATIO_UPPER_BOUND) {
70-
throw new IllegalArgumentException(
71-
String.format(Locale.ROOT, "The two_phase_parameter.prune_ratio must be within [0, 1]. Received: %f", ratio)
72-
);
73-
}
74-
this.ratio = ratio;
68+
this.pruneRatio = pruneRatio;
69+
this.pruneType = pruneType;
7570
if (windowExpansion < WINDOW_EXPANSION_LOWER_BOUND) {
7671
throw new IllegalArgumentException(
7772
String.format(Locale.ROOT, "The two_phase_parameter.expansion_rate must >= 1.0. Received: %f", windowExpansion)
@@ -93,7 +88,7 @@ protected NeuralSparseTwoPhaseProcessor(
9388
*/
9489
@Override
9590
public SearchRequest processRequest(final SearchRequest request) {
96-
if (!enabled || ratio == 0f) {
91+
if (!enabled || pruneRatio == 0f) {
9792
return request;
9893
}
9994
QueryBuilder queryBuilder = request.source().query();
@@ -117,43 +112,6 @@ public String getType() {
117112
return TYPE;
118113
}
119114

120-
/**
121-
* Based on ratio, split a Map into two map by the value.
122-
*
123-
* @param queryTokens the queryTokens map, key is the token String, value is the score.
124-
* @param thresholdRatio The ratio that control how tokens map be split.
125-
* @return A tuple has two element, { token map whose value above threshold, token map whose value below threshold }
126-
*/
127-
public static Tuple<Map<String, Float>, Map<String, Float>> splitQueryTokensByRatioedMaxScoreAsThreshold(
128-
final Map<String, Float> queryTokens,
129-
final float thresholdRatio
130-
) {
131-
if (Objects.isNull(queryTokens)) {
132-
throw new IllegalArgumentException("Query tokens cannot be null or empty.");
133-
}
134-
float max = 0f;
135-
for (Float value : queryTokens.values()) {
136-
max = Math.max(value, max);
137-
}
138-
float threshold = max * thresholdRatio;
139-
140-
Map<Boolean, Map<String, Float>> queryTokensByScore = queryTokens.entrySet()
141-
.stream()
142-
.collect(
143-
Collectors.partitioningBy(entry -> entry.getValue() >= threshold, Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))
144-
);
145-
146-
Map<String, Float> highScoreTokens = queryTokensByScore.get(Boolean.TRUE);
147-
Map<String, Float> lowScoreTokens = queryTokensByScore.get(Boolean.FALSE);
148-
if (Objects.isNull(highScoreTokens)) {
149-
highScoreTokens = Collections.emptyMap();
150-
}
151-
if (Objects.isNull(lowScoreTokens)) {
152-
lowScoreTokens = Collections.emptyMap();
153-
}
154-
return Tuple.tuple(highScoreTokens, lowScoreTokens);
155-
}
156-
157115
private QueryBuilder getNestedQueryBuilderFromNeuralSparseQueryBuilderMap(
158116
final Multimap<NeuralSparseQueryBuilder, Float> queryBuilderFloatMap
159117
) {
@@ -201,7 +159,10 @@ private Multimap<NeuralSparseQueryBuilder, Float> collectNeuralSparseQueryBuilde
201159
* - Docs besides TopDocs: Score = HighScoreToken's score
202160
* - Final TopDocs: Score = HighScoreToken's score + LowScoreToken's score
203161
*/
204-
NeuralSparseQueryBuilder modifiedQueryBuilder = neuralSparseQueryBuilder.getCopyNeuralSparseQueryBuilderForTwoPhase(ratio);
162+
NeuralSparseQueryBuilder modifiedQueryBuilder = neuralSparseQueryBuilder.getCopyNeuralSparseQueryBuilderForTwoPhase(
163+
pruneRatio,
164+
pruneType
165+
);
205166
result.put(modifiedQueryBuilder, updatedBoost);
206167
}
207168
// We only support BoostQuery, BooleanQuery and NeuralSparseQuery now. For other compound query type which are not support now, will
@@ -248,16 +209,40 @@ public NeuralSparseTwoPhaseProcessor create(
248209
boolean enabled = ConfigurationUtils.readBooleanProperty(TYPE, tag, config, ENABLE_KEY, DEFAULT_ENABLED);
249210
Map<String, Object> twoPhaseConfigMap = ConfigurationUtils.readOptionalMap(TYPE, tag, config, PARAMETER_KEY);
250211

251-
float ratio = DEFAULT_RATIO;
212+
float pruneRatio = DEFAULT_RATIO;
252213
float windowExpansion = DEFAULT_WINDOW_EXPANSION;
253214
int maxWindowSize = DEFAULT_MAX_WINDOW_SIZE;
215+
PruneType pruneType = DEFAULT_PRUNE_TYPE;
254216
if (Objects.nonNull(twoPhaseConfigMap)) {
255-
ratio = ((Number) twoPhaseConfigMap.getOrDefault(RATIO_KEY, ratio)).floatValue();
217+
pruneRatio = ((Number) twoPhaseConfigMap.getOrDefault(PruneUtils.PRUNE_RATIO_FIELD, pruneRatio)).floatValue();
256218
windowExpansion = ((Number) twoPhaseConfigMap.getOrDefault(EXPANSION_KEY, windowExpansion)).floatValue();
257219
maxWindowSize = ((Number) twoPhaseConfigMap.getOrDefault(MAX_WINDOW_SIZE_KEY, maxWindowSize)).intValue();
220+
pruneType = PruneType.fromString(
221+
twoPhaseConfigMap.getOrDefault(PruneUtils.PRUNE_TYPE_FIELD, pruneType.getValue()).toString()
222+
);
223+
}
224+
if (!PruneUtils.isValidPruneRatio(pruneType, pruneRatio)) {
225+
throw new IllegalArgumentException(
226+
String.format(
227+
Locale.ROOT,
228+
"Illegal prune_ratio %f for prune_type: %s. %s",
229+
pruneRatio,
230+
pruneType.getValue(),
231+
PruneUtils.getValidPruneRatioDescription(pruneType)
232+
)
233+
);
258234
}
259235

260-
return new NeuralSparseTwoPhaseProcessor(tag, description, ignoreFailure, enabled, ratio, windowExpansion, maxWindowSize);
236+
return new NeuralSparseTwoPhaseProcessor(
237+
tag,
238+
description,
239+
ignoreFailure,
240+
enabled,
241+
pruneRatio,
242+
pruneType,
243+
windowExpansion,
244+
maxWindowSize
245+
);
261246
}
262247
}
263248

src/main/java/org/opensearch/neuralsearch/processor/SparseEncodingProcessor.java

+24-6
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,19 @@
88
import java.util.Map;
99
import java.util.function.BiConsumer;
1010
import java.util.function.Consumer;
11+
import java.util.stream.Collectors;
1112

13+
import lombok.Getter;
1214
import org.opensearch.cluster.service.ClusterService;
1315
import org.opensearch.core.action.ActionListener;
1416
import org.opensearch.env.Environment;
1517
import org.opensearch.ingest.IngestDocument;
1618
import org.opensearch.neuralsearch.ml.MLCommonsClientAccessor;
19+
import org.opensearch.neuralsearch.util.prune.PruneType;
1720
import org.opensearch.neuralsearch.util.TokenWeightUtil;
1821

1922
import lombok.extern.log4j.Log4j2;
23+
import org.opensearch.neuralsearch.util.prune.PruneUtils;
2024

2125
/**
2226
* This processor is used for user input data text sparse encoding processing, model_id can be used to indicate which model user use,
@@ -27,18 +31,26 @@ public final class SparseEncodingProcessor extends InferenceProcessor {
2731

2832
public static final String TYPE = "sparse_encoding";
2933
public static final String LIST_TYPE_NESTED_MAP_KEY = "sparse_encoding";
34+
@Getter
35+
private final PruneType pruneType;
36+
@Getter
37+
private final float pruneRatio;
3038

3139
public SparseEncodingProcessor(
3240
String tag,
3341
String description,
3442
int batchSize,
3543
String modelId,
3644
Map<String, Object> fieldMap,
45+
PruneType pruneType,
46+
float pruneRatio,
3747
MLCommonsClientAccessor clientAccessor,
3848
Environment environment,
3949
ClusterService clusterService
4050
) {
4151
super(tag, description, batchSize, TYPE, LIST_TYPE_NESTED_MAP_KEY, modelId, fieldMap, clientAccessor, environment, clusterService);
52+
this.pruneType = pruneType;
53+
this.pruneRatio = pruneRatio;
4254
}
4355

4456
@Override
@@ -49,17 +61,23 @@ public void doExecute(
4961
BiConsumer<IngestDocument, Exception> handler
5062
) {
5163
mlCommonsClientAccessor.inferenceSentencesWithMapResult(this.modelId, inferenceList, ActionListener.wrap(resultMaps -> {
52-
setVectorFieldsToDocument(ingestDocument, ProcessMap, TokenWeightUtil.fetchListOfTokenWeightMap(resultMaps));
64+
List<Map<String, Float>> sparseVectors = TokenWeightUtil.fetchListOfTokenWeightMap(resultMaps)
65+
.stream()
66+
.map(vector -> PruneUtils.pruneSparseVector(pruneType, pruneRatio, vector))
67+
.collect(Collectors.toList());
68+
setVectorFieldsToDocument(ingestDocument, ProcessMap, sparseVectors);
5369
handler.accept(ingestDocument, null);
5470
}, e -> { handler.accept(null, e); }));
5571
}
5672

5773
@Override
5874
public void doBatchExecute(List<String> inferenceList, Consumer<List<?>> handler, Consumer<Exception> onException) {
59-
mlCommonsClientAccessor.inferenceSentencesWithMapResult(
60-
this.modelId,
61-
inferenceList,
62-
ActionListener.wrap(resultMaps -> handler.accept(TokenWeightUtil.fetchListOfTokenWeightMap(resultMaps)), onException)
63-
);
75+
mlCommonsClientAccessor.inferenceSentencesWithMapResult(this.modelId, inferenceList, ActionListener.wrap(resultMaps -> {
76+
List<Map<String, Float>> sparseVectors = TokenWeightUtil.fetchListOfTokenWeightMap(resultMaps)
77+
.stream()
78+
.map(vector -> PruneUtils.pruneSparseVector(pruneType, pruneRatio, vector))
79+
.collect(Collectors.toList());
80+
handler.accept(sparseVectors);
81+
}, onException));
6482
}
6583
}

src/main/java/org/opensearch/neuralsearch/processor/factory/SparseEncodingProcessorFactory.java

+40-2
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,13 @@
66

77
import static org.opensearch.ingest.ConfigurationUtils.readMap;
88
import static org.opensearch.ingest.ConfigurationUtils.readStringProperty;
9-
import static org.opensearch.neuralsearch.processor.TextEmbeddingProcessor.TYPE;
9+
import static org.opensearch.ingest.ConfigurationUtils.readOptionalStringProperty;
10+
import static org.opensearch.ingest.ConfigurationUtils.readDoubleProperty;
1011
import static org.opensearch.neuralsearch.processor.TextEmbeddingProcessor.MODEL_ID_FIELD;
1112
import static org.opensearch.neuralsearch.processor.TextEmbeddingProcessor.FIELD_MAP_FIELD;
13+
import static org.opensearch.neuralsearch.processor.SparseEncodingProcessor.TYPE;
1214

15+
import java.util.Locale;
1316
import java.util.Map;
1417

1518
import org.opensearch.cluster.service.ClusterService;
@@ -19,6 +22,8 @@
1922
import org.opensearch.neuralsearch.processor.SparseEncodingProcessor;
2023

2124
import lombok.extern.log4j.Log4j2;
25+
import org.opensearch.neuralsearch.util.prune.PruneUtils;
26+
import org.opensearch.neuralsearch.util.prune.PruneType;
2227

2328
/**
2429
* Factory for sparse encoding ingest processor for ingestion pipeline. Instantiates processor based on user provided input.
@@ -40,7 +45,40 @@ public SparseEncodingProcessorFactory(MLCommonsClientAccessor clientAccessor, En
4045
protected AbstractBatchingProcessor newProcessor(String tag, String description, int batchSize, Map<String, Object> config) {
4146
String modelId = readStringProperty(TYPE, tag, config, MODEL_ID_FIELD);
4247
Map<String, Object> fieldMap = readMap(TYPE, tag, config, FIELD_MAP_FIELD);
48+
// if the field is miss, will return PruneType.None
49+
PruneType pruneType = PruneType.fromString(readOptionalStringProperty(TYPE, tag, config, PruneUtils.PRUNE_TYPE_FIELD));
50+
float pruneRatio = 0;
51+
if (pruneType != PruneType.NONE) {
52+
// if we have prune type, then prune ratio field must have value
53+
// readDoubleProperty will throw exception if value is not present
54+
pruneRatio = readDoubleProperty(TYPE, tag, config, PruneUtils.PRUNE_RATIO_FIELD).floatValue();
55+
if (!PruneUtils.isValidPruneRatio(pruneType, pruneRatio)) {
56+
throw new IllegalArgumentException(
57+
String.format(
58+
Locale.ROOT,
59+
"Illegal prune_ratio %f for prune_type: %s. %s",
60+
pruneRatio,
61+
pruneType.getValue(),
62+
PruneUtils.getValidPruneRatioDescription(pruneType)
63+
)
64+
);
65+
}
66+
} else if (config.containsKey(PruneUtils.PRUNE_RATIO_FIELD)) {
67+
// if we don't have prune type, then prune ratio field must not have value
68+
throw new IllegalArgumentException("prune_ratio field is not supported when prune_type is not provided");
69+
}
4370

44-
return new SparseEncodingProcessor(tag, description, batchSize, modelId, fieldMap, clientAccessor, environment, clusterService);
71+
return new SparseEncodingProcessor(
72+
tag,
73+
description,
74+
batchSize,
75+
modelId,
76+
fieldMap,
77+
pruneType,
78+
pruneRatio,
79+
clientAccessor,
80+
environment,
81+
clusterService
82+
);
4583
}
4684
}

src/main/java/org/opensearch/neuralsearch/query/NeuralSparseQueryBuilder.java

+13-10
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@
4747
import lombok.NoArgsConstructor;
4848
import lombok.Setter;
4949
import lombok.experimental.Accessors;
50-
51-
import static org.opensearch.neuralsearch.processor.NeuralSparseTwoPhaseProcessor.splitQueryTokensByRatioedMaxScoreAsThreshold;
50+
import org.opensearch.neuralsearch.util.prune.PruneType;
51+
import org.opensearch.neuralsearch.util.prune.PruneUtils;
5252

5353
/**
5454
* SparseEncodingQueryBuilder is responsible for handling "neural_sparse" query types. It uses an ML NEURAL_SPARSE model
@@ -90,6 +90,7 @@ public class NeuralSparseQueryBuilder extends AbstractQueryBuilder<NeuralSparseQ
9090
// 2. If it's the sub query only build for two-phase, the value will be set to -1 * ratio of processor.
9191
// Then in the DoToQuery, we can use this to determine which type are this queryBuilder.
9292
private float twoPhasePruneRatio = 0F;
93+
private PruneType twoPhasePruneType = PruneType.NONE;
9394

9495
private static final Version MINIMAL_SUPPORTED_VERSION_DEFAULT_MODEL_ID = Version.V_2_13_0;
9596

@@ -129,22 +130,23 @@ public NeuralSparseQueryBuilder(StreamInput in) throws IOException {
129130

130131
/**
131132
* Copy this QueryBuilder for two phase rescorer, set the copy one's twoPhasePruneRatio to -1.
132-
* @param ratio the parameter of the NeuralSparseTwoPhaseProcessor, control how to split the queryTokens to two phase.
133+
* @param pruneRatio the parameter of the NeuralSparseTwoPhaseProcessor, control how to split the queryTokens to two phase.
133134
* @return A copy NeuralSparseQueryBuilder for twoPhase, it will be added to the rescorer.
134135
*/
135-
public NeuralSparseQueryBuilder getCopyNeuralSparseQueryBuilderForTwoPhase(float ratio) {
136-
this.twoPhasePruneRatio(ratio);
136+
public NeuralSparseQueryBuilder getCopyNeuralSparseQueryBuilderForTwoPhase(float pruneRatio, PruneType pruneType) {
137+
this.twoPhasePruneRatio(pruneRatio);
138+
this.twoPhasePruneType(pruneType);
137139
NeuralSparseQueryBuilder copy = new NeuralSparseQueryBuilder().fieldName(this.fieldName)
138140
.queryName(this.queryName)
139141
.queryText(this.queryText)
140142
.modelId(this.modelId)
141143
.maxTokenScore(this.maxTokenScore)
142-
.twoPhasePruneRatio(-1f * ratio);
144+
.twoPhasePruneRatio(-1f * pruneRatio);
143145
if (Objects.nonNull(this.queryTokensSupplier)) {
144146
Map<String, Float> tokens = queryTokensSupplier.get();
145147
// Splitting tokens based on a threshold value: tokens greater than the threshold are stored in v1,
146148
// while those less than or equal to the threshold are stored in v2.
147-
Tuple<Map<String, Float>, Map<String, Float>> splitTokens = splitQueryTokensByRatioedMaxScoreAsThreshold(tokens, ratio);
149+
Tuple<Map<String, Float>, Map<String, Float>> splitTokens = PruneUtils.splitSparseVector(pruneType, pruneRatio, tokens);
148150
this.queryTokensSupplier(() -> splitTokens.v1());
149151
copy.queryTokensSupplier(() -> splitTokens.v2());
150152
} else {
@@ -346,9 +348,10 @@ private BiConsumer<Client, ActionListener<?>> getModelInferenceAsync(SetOnce<Map
346348
ActionListener.wrap(mapResultList -> {
347349
Map<String, Float> queryTokens = TokenWeightUtil.fetchListOfTokenWeightMap(mapResultList).get(0);
348350
if (Objects.nonNull(twoPhaseSharedQueryToken)) {
349-
Tuple<Map<String, Float>, Map<String, Float>> splitQueryTokens = splitQueryTokensByRatioedMaxScoreAsThreshold(
350-
queryTokens,
351-
twoPhasePruneRatio
351+
Tuple<Map<String, Float>, Map<String, Float>> splitQueryTokens = PruneUtils.splitSparseVector(
352+
twoPhasePruneType,
353+
twoPhasePruneRatio,
354+
queryTokens
352355
);
353356
setOnce.set(splitQueryTokens.v1());
354357
twoPhaseSharedQueryToken = splitQueryTokens.v2();

0 commit comments

Comments
 (0)