Skip to content

Commit

Permalink
Fix deadlock when delete timeseries after loading data.
Browse files Browse the repository at this point in the history
  • Loading branch information
ColinLeeo authored Jan 24, 2024
1 parent 5c6c972 commit 2072603
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,15 @@ public void testLoad() throws Exception {
}
}
}

// try delete after loading. Expect no deadlock
try (Connection connection = EnvFactory.getEnv().getConnection();
Statement statement = connection.createStatement()) {
statement.execute(
String.format(
"delete timeseries %s.%s",
SchemaConfig.DEVICE_0, SchemaConfig.MEASUREMENT_00.getMeasurementId()));
}
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ public class MPPQueryContext {

private Filter globalTimeFilter;

private boolean acquiredLock;
private int acquiredLockNum;

public MPPQueryContext(QueryId queryId) {
this.queryId = queryId;
Expand Down Expand Up @@ -157,12 +157,12 @@ public String getSql() {
return sql;
}

public boolean getAcquiredLock() {
return acquiredLock;
public int getAcquiredLockNum() {
return acquiredLockNum;
}

public void setAcquiredLock(boolean acuqired) {
acquiredLock = acuqired;
public void addAcquiredLockNum() {
acquiredLockNum++;
}

public void generateGlobalTimeFilter(Analysis analysis) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,9 @@ public ExecutionResult execute(
execution.start();
return execution.getStatus();
} finally {
if (queryContext != null && queryContext.getAcquiredLock()) {
DataNodeSchemaCache.getInstance().releaseInsertLock();
int lockNums = queryContext.getAcquiredLockNum();
if (queryContext != null && lockNums > 0) {
for (int i = 0; i < lockNums; i++) DataNodeSchemaCache.getInstance().releaseInsertLock();
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ public void fetchAndComputeSchemaWithAutoCreate(
// The schema cache R/W and fetch operation must be locked together thus the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeInsertLock();
context.setAcquiredLock(true);
context.addAcquiredLockNum();
schemaCache.takeReadLock();
try {
Pair<Template, PartialPath> templateSetInfo =
Expand Down Expand Up @@ -204,7 +204,7 @@ public void fetchAndComputeSchemaWithAutoCreate(
// The schema cache R/W and fetch operation must be locked together thus the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeInsertLock();
context.setAcquiredLock(true);
context.addAcquiredLockNum();
schemaCache.takeReadLock();
try {

Expand Down Expand Up @@ -248,7 +248,7 @@ public ISchemaTree fetchSchemaListWithAutoCreate(
// The schema cache R/W and fetch operation must be locked together thus the cache clean
// operation executed by delete timeseries will be effective.
schemaCache.takeInsertLock();
context.setAcquiredLock(true);
context.addAcquiredLockNum();
schemaCache.takeReadLock();
try {
ClusterSchemaTree schemaTree = new ClusterSchemaTree();
Expand Down

0 comments on commit 2072603

Please sign in to comment.