-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fix: Progress bucket count query if empty (#645)
* the bucket queries continues from the end of the previous * the limit of 10000 times 10 seconds is around 28 hours * meaning that if there were no new events for 28 hours the queries will not progress since it will not find any more * solution here is to append empty bucket at the end in that case * use last bucket * reintroduce clearUntil to reduce memory use
- Loading branch information
Showing
10 changed files
with
203 additions
and
102 deletions.
There are no files selected for viewing
2 changes: 2 additions & 0 deletions
2
core/src/main/mima-filters/1.3.1.backwards.excludes/buckets.excludes
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
# internal | ||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.r2dbc.internal.BySliceQuery#Buckets.this") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
96 changes: 96 additions & 0 deletions
96
core/src/test/scala/akka/persistence/r2dbc/query/BucketCountSpec.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
/* | ||
* Copyright (C) 2022 - 2024 Lightbend Inc. <https://www.lightbend.com> | ||
*/ | ||
|
||
package akka.persistence.r2dbc.query | ||
|
||
import org.scalatest.wordspec.AnyWordSpecLike | ||
|
||
import akka.actor.testkit.typed.scaladsl.LogCapturing | ||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit | ||
import akka.actor.typed.ActorSystem | ||
import akka.persistence.r2dbc.TestConfig | ||
import akka.persistence.r2dbc.TestData | ||
import akka.persistence.r2dbc.TestDbLifecycle | ||
import akka.persistence.r2dbc.internal.BySliceQuery.Buckets | ||
import akka.persistence.r2dbc.internal.InstantFactory | ||
|
||
class BucketCountSpec | ||
extends ScalaTestWithActorTestKit(TestConfig.config) | ||
with AnyWordSpecLike | ||
with TestDbLifecycle | ||
with TestData | ||
with LogCapturing { | ||
|
||
override def typedSystem: ActorSystem[_] = system | ||
|
||
private val dao = settings.connectionFactorySettings.dialect.createQueryDao(r2dbcExecutorProvider) | ||
|
||
"BySliceQuery.Dao" should { | ||
|
||
"count events in 10 second buckets" in { | ||
pendingIfMoreThanOneDataPartition() | ||
|
||
val entityType = nextEntityType() | ||
val pid1 = nextPid(entityType) | ||
val pid2 = nextPid(entityType) | ||
val slice1 = persistenceExt.sliceForPersistenceId(pid1) | ||
val slice2 = persistenceExt.sliceForPersistenceId(pid2) | ||
|
||
val startTime = InstantFactory.now().minusSeconds(3600) | ||
val bucketStartTime = (startTime.getEpochSecond / 10) * 10 | ||
|
||
(0 until 10).foreach { i => | ||
writeEvent(slice1, pid1, 1 + i, startTime.plusSeconds(Buckets.BucketDurationSeconds * i), s"e1-$i") | ||
writeEvent(slice2, pid2, 1 + i, startTime.plusSeconds(Buckets.BucketDurationSeconds * i), s"e1-$i") | ||
} | ||
|
||
val buckets = | ||
dao | ||
.countBuckets(entityType, 0, persistenceExt.numberOfSlices - 1, startTime, Buckets.Limit) | ||
.futureValue | ||
withClue(s"startTime $startTime ($bucketStartTime): ") { | ||
buckets.size shouldBe 10 | ||
buckets.head.startTime shouldBe bucketStartTime | ||
buckets.last.startTime shouldBe (bucketStartTime + 9 * Buckets.BucketDurationSeconds) | ||
buckets.map(_.count).toSet shouldBe Set(2) | ||
buckets.map(_.count).sum shouldBe (2 * 10) | ||
} | ||
} | ||
|
||
"append empty bucket if no events in the last bucket, limit before now" in { | ||
pendingIfMoreThanOneDataPartition() | ||
|
||
val entityType = nextEntityType() | ||
val pid1 = nextPid(entityType) | ||
val pid2 = nextPid(entityType) | ||
val slice1 = persistenceExt.sliceForPersistenceId(pid1) | ||
val slice2 = persistenceExt.sliceForPersistenceId(pid2) | ||
|
||
val limit = 100 | ||
val startTime = InstantFactory.now().minusSeconds(3600) | ||
val bucketStartTime = (startTime.getEpochSecond / 10) * 10 | ||
|
||
(0 until 10).foreach { i => | ||
writeEvent(slice1, pid1, 1 + i, startTime.plusSeconds(Buckets.BucketDurationSeconds * i), s"e1-$i") | ||
writeEvent(slice2, pid2, 1 + i, startTime.plusSeconds(Buckets.BucketDurationSeconds * i), s"e1-$i") | ||
} | ||
|
||
val buckets = | ||
dao | ||
.countBuckets(entityType, 0, persistenceExt.numberOfSlices - 1, startTime, limit) | ||
.futureValue | ||
withClue(s"startTime $startTime ($bucketStartTime): ") { | ||
buckets.size shouldBe 11 | ||
buckets.head.startTime shouldBe bucketStartTime | ||
// the toTimestamp of the sql query is one bucket more than fromTimestamp + (limit * BucketDurationSeconds) | ||
buckets.last.startTime shouldBe (bucketStartTime + (limit + 1) * Buckets.BucketDurationSeconds) | ||
buckets.last.count shouldBe 0 | ||
buckets.dropRight(1).map(_.count).toSet shouldBe Set(2) | ||
buckets.map(_.count).sum shouldBe (2 * 10) | ||
} | ||
} | ||
|
||
} | ||
|
||
} |
Oops, something went wrong.