From d5bb924d11a3ab8241b961bdfa8fb96662a9fa01 Mon Sep 17 00:00:00 2001 From: Eric Zimanyi Date: Thu, 23 Jul 2020 10:35:25 -0400 Subject: [PATCH] perf(test): Speed up orca-sql tests (#3827) * perf(test): Speed up PgSQL tests When overriding setupSpec, Spock runs both the parent (super.setupSpec) and the subclass setupSpec (in that order). This means that for the PgSQL tests, we're creating a MySQL database then immediately discarding it to replace it with a PgSQL database. As creating the database spins up a container and is resonably slow, lightly refactor this so that we don't do this extra work. * refactor(test): Push some logic down from execution repository tests There's a lot of coupling between PipelineExecutionRepositoryTck and its subclasses that causes initialization order dependencies. In order to make it easier to make changes (such as changing what fields are updated in setup vs setupSpec), refactor this so that each subclass is fully responsible for initialization and we can control the order. The main issue here is that the super class was storing and initializing the ExecutionRepository, but we can't actually create the ExecutionRepository until we've created the persistence store, which is owned by the implementations. So we were subtly depending on each subclass having already initialized its persistence store before we create the ExecutionRepository. Now it's up to each subclass ot handle this order-dependence and expose a method to get the ExecutionRepository. * perf(test): Share execution repository among tests Given that we share the actual persistence between tests, there's not much benefit to creating a new ExecutionRepository for each test. Speed the tests up by re-using it between tests. * perf(test): Remove previous repository from base test The previous repository is only used in the Jedis tests; remove it from the base class so we can avoid creating it and cleaning it up after every test. Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../PipelineExecutionRepositoryTck.groovy | 182 ++++++++---------- ...edisPipelineExecutionRepositorySpec.groovy | 58 ++++-- ...CleanupPollingNotificationAgentSpec.groovy | 19 +- .../SqlPipelineExecutionRepositorySpec.groovy | 36 ++-- 4 files changed, 164 insertions(+), 131 deletions(-) diff --git a/orca-core-tck/src/main/groovy/com/netflix/spinnaker/orca/pipeline/persistence/PipelineExecutionRepositoryTck.groovy b/orca-core-tck/src/main/groovy/com/netflix/spinnaker/orca/pipeline/persistence/PipelineExecutionRepositoryTck.groovy index 75f2e5b020e..5e0d65e87ef 100644 --- a/orca-core-tck/src/main/groovy/com/netflix/spinnaker/orca/pipeline/persistence/PipelineExecutionRepositoryTck.groovy +++ b/orca-core-tck/src/main/groovy/com/netflix/spinnaker/orca/pipeline/persistence/PipelineExecutionRepositoryTck.groovy @@ -45,23 +45,9 @@ import static java.time.temporal.ChronoUnit.HOURS @Subject(ExecutionRepository) @Unroll abstract class PipelineExecutionRepositoryTck extends Specification { - - @Subject - ExecutionRepository repository - - @Subject - ExecutionRepository previousRepository - def clock = Clock.fixed(Instant.now(), UTC) - void setup() { - repository = createExecutionRepository() - previousRepository = createExecutionRepositoryPrevious() - } - - abstract T createExecutionRepository() - - abstract T createExecutionRepositoryPrevious() + abstract T repository() def "can retrieve pipelines by status"() { given: @@ -75,9 +61,9 @@ abstract class PipelineExecutionRepositoryTck ext } when: - repository.store(runningExecution) - repository.store(succeededExecution) - def pipelines = repository.retrievePipelinesForPipelineConfigId( + repository().store(runningExecution) + repository().store(succeededExecution) + def pipelines = repository().retrievePipelinesForPipelineConfigId( "pipeline-1", new ExecutionCriteria(pageSize: 5, statuses: ["RUNNING", "SUCCEEDED", "TERMINAL"]) ).subscribeOn(Schedulers.io()).toList().toBlocking().single() @@ -85,7 +71,7 @@ abstract class PipelineExecutionRepositoryTck ext pipelines*.id.sort() == [runningExecution.id, succeededExecution.id].sort() when: - pipelines = repository.retrievePipelinesForPipelineConfigId( + pipelines = repository().retrievePipelinesForPipelineConfigId( "pipeline-1", new ExecutionCriteria(pageSize: 5, statuses: ["RUNNING"]) ).subscribeOn(Schedulers.io()).toList().toBlocking().single() @@ -93,7 +79,7 @@ abstract class PipelineExecutionRepositoryTck ext pipelines*.id.sort() == [runningExecution.id].sort() when: - pipelines = repository.retrievePipelinesForPipelineConfigId( + pipelines = repository().retrievePipelinesForPipelineConfigId( "pipeline-1", new ExecutionCriteria(pageSize: 5, statuses: ["TERMINAL"]) ).subscribeOn(Schedulers.io()).toList().toBlocking().single() @@ -116,9 +102,9 @@ abstract class PipelineExecutionRepositoryTck ext } when: - repository.store(runningExecution) - repository.store(succeededExecution) - def orchestrations = repository.retrieveOrchestrationsForApplication( + repository().store(runningExecution) + repository().store(succeededExecution) + def orchestrations = repository().retrieveOrchestrationsForApplication( runningExecution.application, new ExecutionCriteria(pageSize: 5, statuses: ["RUNNING", "SUCCEEDED", "TERMINAL"]) ).subscribeOn(Schedulers.io()).toList().toBlocking().single() @@ -126,7 +112,7 @@ abstract class PipelineExecutionRepositoryTck ext orchestrations*.id.sort() == [runningExecution.id, succeededExecution.id].sort() when: - orchestrations = repository.retrieveOrchestrationsForApplication( + orchestrations = repository().retrieveOrchestrationsForApplication( runningExecution.application, new ExecutionCriteria(pageSize: 5, statuses: ["RUNNING"]) ).subscribeOn(Schedulers.io()).toList().toBlocking().single() @@ -134,7 +120,7 @@ abstract class PipelineExecutionRepositoryTck ext orchestrations*.id.sort() == [runningExecution.id].sort() when: - orchestrations = repository.retrieveOrchestrationsForApplication( + orchestrations = repository().retrieveOrchestrationsForApplication( runningExecution.application, new ExecutionCriteria(pageSize: 5, statuses: ["TERMINAL"]) ).subscribeOn(Schedulers.io()).toList().toBlocking().single() @@ -158,9 +144,9 @@ abstract class PipelineExecutionRepositoryTck ext } when: - repository.store(runningExecution) - repository.store(succeededExecution) - def orchestrations = repository.retrieveOrchestrationsForApplication( + repository().store(runningExecution) + repository().store(succeededExecution) + def orchestrations = repository().retrieveOrchestrationsForApplication( runningExecution.application, new ExecutionCriteria(pageSize: 5, statuses: ["RUNNING", "SUCCEEDED", "TERMINAL"]), NATURAL_ASC @@ -172,7 +158,7 @@ abstract class PipelineExecutionRepositoryTck ext orchestrations*.id == [succeededExecution.id, runningExecution.id] when: - orchestrations = repository.retrieveOrchestrationsForApplication( + orchestrations = repository().retrieveOrchestrationsForApplication( runningExecution.application, new ExecutionCriteria(pageSize: 5, statuses: ["RUNNING"]), NATURAL_ASC @@ -182,7 +168,7 @@ abstract class PipelineExecutionRepositoryTck ext orchestrations*.id == [runningExecution.id] when: - orchestrations = repository.retrieveOrchestrationsForApplication( + orchestrations = repository().retrieveOrchestrationsForApplication( runningExecution.application, new ExecutionCriteria(pageSize: 5, statuses: ["TERMINAL"]), NATURAL_ASC @@ -208,11 +194,11 @@ abstract class PipelineExecutionRepositoryTck ext startTime = config.startTime } }.forEach { - repository.store(it) + repository().store(it) } when: - def orchestrations = repository.retrieveOrchestrationsForApplication( + def orchestrations = repository().retrieveOrchestrationsForApplication( "covfefe", new ExecutionCriteria().with { startTimeCutoff = clock @@ -255,12 +241,12 @@ abstract class PipelineExecutionRepositoryTck ext } } def application = pipeline.application - repository.store(pipeline) + repository().store(pipeline) expect: - repository.retrieve(PIPELINE).toBlocking().first().id == pipeline.id + repository().retrieve(PIPELINE).toBlocking().first().id == pipeline.id - with(repository.retrieve(pipeline.type, pipeline.id)) { + with(repository().retrieve(pipeline.type, pipeline.id)) { id == pipeline.id application == pipeline.application name == pipeline.name @@ -277,7 +263,7 @@ abstract class PipelineExecutionRepositoryTck ext def "trying to retrieve an invalid #type.simpleName id throws an exception"() { when: - repository.retrieve(type, "invalid") + repository().retrieve(type, "invalid") then: thrown ExecutionNotFoundException @@ -288,7 +274,7 @@ abstract class PipelineExecutionRepositoryTck ext def "trying to delete a non-existent #type.simpleName id does not throw an exception"() { when: - repository.delete(type, "invalid") + repository().delete(type, "invalid") then: notThrown ExecutionNotFoundException @@ -317,33 +303,33 @@ abstract class PipelineExecutionRepositoryTck ext } and: - repository.store(pipeline) - repository.delete(PIPELINE, pipeline.id) + repository().store(pipeline) + repository().delete(PIPELINE, pipeline.id) when: - repository.retrieve(PIPELINE, pipeline.id) + repository().retrieve(PIPELINE, pipeline.id) then: thrown ExecutionNotFoundException and: - repository.retrieve(PIPELINE).toList().toBlocking().first() == [] + repository().retrieve(PIPELINE).toList().toBlocking().first() == [] } def "updateStatus sets startTime to current time if new status is RUNNING"() { given: - repository.store(execution) + repository().store(execution) expect: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { startTime == null } when: - repository.updateStatus(execution.type, execution.id, RUNNING) + repository().updateStatus(execution.type, execution.id, RUNNING) then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == RUNNING startTime != null } @@ -359,19 +345,19 @@ abstract class PipelineExecutionRepositoryTck ext def "updateStatus sets endTime to current time if new status is #status"() { given: execution.startTime = System.currentTimeMillis() - repository.store(execution) + repository().store(execution) expect: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { startTime != null endTime == null } when: - repository.updateStatus(execution.type, execution.id, status) + repository().updateStatus(execution.type, execution.id, status) then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == status endTime != null } @@ -385,19 +371,19 @@ abstract class PipelineExecutionRepositoryTck ext def "updateStatus does not set endTime if a pipeline never started"() { given: - repository.store(execution) + repository().store(execution) expect: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { startTime == null endTime == null } when: - repository.updateStatus(execution.type, execution.id, status) + repository().updateStatus(execution.type, execution.id, status) then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == status endTime == null } @@ -410,19 +396,19 @@ abstract class PipelineExecutionRepositoryTck ext def "cancelling a not-yet-started execution updates the status immediately"() { given: def execution = pipeline() - repository.store(execution) + repository().store(execution) expect: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == NOT_STARTED } when: - repository.cancel(execution.type, execution.id) + repository().cancel(execution.type, execution.id) then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { canceled status == CANCELED } @@ -431,20 +417,20 @@ abstract class PipelineExecutionRepositoryTck ext def "cancelling a running execution does not update the status immediately"() { given: def execution = pipeline() - repository.store(execution) - repository.updateStatus(execution.type, execution.id, RUNNING) + repository().store(execution) + repository().updateStatus(execution.type, execution.id, RUNNING) expect: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == RUNNING } when: - repository.cancel(execution.type, execution.id) + repository().cancel(execution.type, execution.id) then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { canceled status == RUNNING } @@ -455,20 +441,20 @@ abstract class PipelineExecutionRepositoryTck ext given: def execution = pipeline() def user = "user@netflix.com" - repository.store(execution) - repository.updateStatus(execution.type, execution.id, RUNNING) + repository().store(execution) + repository().updateStatus(execution.type, execution.id, RUNNING) expect: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == RUNNING } when: - repository.cancel(execution.type, execution.id, user, reason) + repository().cancel(execution.type, execution.id, user, reason) then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { canceled canceledBy == user status == RUNNING @@ -485,14 +471,14 @@ abstract class PipelineExecutionRepositoryTck ext def "pausing/resuming a running execution will set appropriate 'paused' details"() { given: def execution = pipeline() - repository.store(execution) - repository.updateStatus(execution.type, execution.id, RUNNING) + repository().store(execution) + repository().updateStatus(execution.type, execution.id, RUNNING) when: - repository.pause(execution.type, execution.id, "user@netflix.com") + repository().pause(execution.type, execution.id, "user@netflix.com") then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == PAUSED paused.pauseTime != null paused.resumeTime == null @@ -502,10 +488,10 @@ abstract class PipelineExecutionRepositoryTck ext } when: - repository.resume(execution.type, execution.id, "another@netflix.com") + repository().resume(execution.type, execution.id, "another@netflix.com") then: - with(repository.retrieve(execution.type, execution.id)) { + with(repository().retrieve(execution.type, execution.id)) { status == RUNNING paused.pauseTime != null paused.resumeTime != null @@ -520,11 +506,11 @@ abstract class PipelineExecutionRepositoryTck ext def "should only #method a #expectedStatus execution"() { given: def execution = pipeline() - repository.store(execution) - repository.updateStatus(execution.type, execution.id, status) + repository().store(execution) + repository().updateStatus(execution.type, execution.id, status) when: - repository."${method}"(execution.type, execution.id, "user@netflix.com") + repository()."${method}"(execution.type, execution.id, "user@netflix.com") then: def e = thrown(IllegalStateException) @@ -542,20 +528,20 @@ abstract class PipelineExecutionRepositoryTck ext def "should force resume an execution regardless of status"() { given: def execution = pipeline() - repository.store(execution) - repository.updateStatus(execution.type, execution.id, RUNNING) + repository().store(execution) + repository().updateStatus(execution.type, execution.id, RUNNING) when: - repository.pause(execution.type, execution.id, "user@netflix.com") - execution = repository.retrieve(execution.type, execution.id) + repository().pause(execution.type, execution.id, "user@netflix.com") + execution = repository().retrieve(execution.type, execution.id) then: execution.paused.isPaused() when: - repository.updateStatus(execution.type, execution.id, status) - repository.resume(execution.type, execution.id, "user@netflix.com", true) - execution = repository.retrieve(execution.type, execution.id) + repository().updateStatus(execution.type, execution.id, status) + repository().resume(execution.type, execution.id, "user@netflix.com", true) + execution = repository().retrieve(execution.type, execution.id) then: execution.status == RUNNING @@ -570,18 +556,18 @@ abstract class PipelineExecutionRepositoryTck ext def execution = orchestration { trigger = new DefaultTrigger("manual", "covfefe") } - repository.store(execution) - repository.updateStatus(execution.type, execution.id, RUNNING) + repository().store(execution) + repository().updateStatus(execution.type, execution.id, RUNNING) when: - def result = repository.retrieveOrchestrationForCorrelationId('covfefe') + def result = repository().retrieveOrchestrationForCorrelationId('covfefe') then: result.id == execution.id when: - repository.updateStatus(execution.type, execution.id, SUCCEEDED) - repository.retrieveOrchestrationForCorrelationId('covfefe') + repository().updateStatus(execution.type, execution.id, SUCCEEDED) + repository().retrieveOrchestrationForCorrelationId('covfefe') then: thrown(ExecutionNotFoundException) @@ -592,10 +578,10 @@ abstract class PipelineExecutionRepositoryTck ext def execution = pipeline { trigger = new PipelineTrigger(pipeline()) } - repository.store(execution) + repository().store(execution) expect: - with(repository.retrieve(PIPELINE, execution.id)) { + with(repository().retrieve(PIPELINE, execution.id)) { trigger.parentExecution instanceof PipelineExecution } } @@ -606,10 +592,10 @@ abstract class PipelineExecutionRepositoryTck ext for (status in ExecutionStatus.values()) { pipeline { setStatus(status) - }.with { execution -> repository.store(execution) } + }.with { execution -> repository().store(execution) } orchestration { setStatus(status) - }.with { execution -> repository.store(execution) } + }.with { execution -> repository().store(execution) } } and: @@ -618,7 +604,7 @@ abstract class PipelineExecutionRepositoryTck ext .setPageSize(limit) expect: - with(repository.retrieve(type, criteria).toList().toBlocking().single()) { + with(repository().retrieve(type, criteria).toList().toBlocking().single()) { size() == expectedResults type.every { it == type } if (statuses) { @@ -654,11 +640,11 @@ abstract class PipelineExecutionRepositoryTck ext } when: - repository.store(execution1) - repository.store(execution2) - repository.store(execution3) - repository.store(execution4) - def apps = repository.retrieveAllApplicationNames(executionType, minExecutions) + repository().store(execution1) + repository().store(execution2) + repository().store(execution3) + repository().store(execution4) + def apps = repository().retrieveAllApplicationNames(executionType, minExecutions) then: apps.sort() == expectedApps.sort() diff --git a/orca-redis/src/test/groovy/com/netflix/spinnaker/orca/pipeline/persistence/jedis/JedisPipelineExecutionRepositorySpec.groovy b/orca-redis/src/test/groovy/com/netflix/spinnaker/orca/pipeline/persistence/jedis/JedisPipelineExecutionRepositorySpec.groovy index a09d3696517..979e4c50abf 100644 --- a/orca-redis/src/test/groovy/com/netflix/spinnaker/orca/pipeline/persistence/jedis/JedisPipelineExecutionRepositorySpec.groovy +++ b/orca-redis/src/test/groovy/com/netflix/spinnaker/orca/pipeline/persistence/jedis/JedisPipelineExecutionRepositorySpec.groovy @@ -19,7 +19,6 @@ import com.netflix.spectator.api.NoopRegistry import com.netflix.spectator.api.Registry import com.netflix.spinnaker.kork.jedis.EmbeddedRedis import com.netflix.spinnaker.kork.jedis.JedisClientDelegate -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate import com.netflix.spinnaker.kork.jedis.RedisClientSelector import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import com.netflix.spinnaker.orca.pipeline.StageExecutionFactory @@ -31,6 +30,7 @@ import redis.clients.jedis.Jedis import redis.clients.jedis.util.Pool import spock.lang.AutoCleanup import spock.lang.Shared +import spock.lang.Subject import spock.lang.Unroll import java.util.concurrent.CountDownLatch @@ -56,9 +56,47 @@ class JedisPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTc @AutoCleanup("destroy") EmbeddedRedis embeddedRedisPrevious + @Shared + @Subject + RedisExecutionRepository repository + + @Override + RedisExecutionRepository repository() { + return repository + } + + @Shared + @Subject + RedisExecutionRepository previousRepository + + RedisExecutionRepository previousRepository() { + return previousRepository + } + + @Shared + Pool jedisPool + + @Shared + Pool jedisPoolPrevious + + @Shared + Jedis jedis + + @Shared + RedisClientSelector redisClientSelector + def setupSpec() { embeddedRedis = EmbeddedRedis.embed() embeddedRedisPrevious = EmbeddedRedis.embed() + jedisPool = embeddedRedis.pool + jedisPoolPrevious = embeddedRedisPrevious.pool + jedis = jedisPool.resource + redisClientSelector = new RedisClientSelector([ + new JedisClientDelegate("primaryDefault", jedisPool), + new JedisClientDelegate("previousDefault", jedisPoolPrevious) + ]) + repository = createExecutionRepository() + previousRepository = createExecutionRepositoryPrevious() } def cleanup() { @@ -66,26 +104,12 @@ class JedisPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTc embeddedRedisPrevious.jedis.withCloseable { it.flushDB() } } - Pool jedisPool = embeddedRedis.pool - Pool jedisPoolPrevious = embeddedRedisPrevious.pool - - RedisClientDelegate redisClientDelegate = new JedisClientDelegate("primaryDefault", jedisPool) - RedisClientDelegate previousRedisClientDelegate = new JedisClientDelegate("previousDefault", jedisPoolPrevious) - RedisClientSelector redisClientSelector = new RedisClientSelector([redisClientDelegate, previousRedisClientDelegate]) - - Registry registry = new NoopRegistry() - - @AutoCleanup - def jedis = jedisPool.resource - - @Override RedisExecutionRepository createExecutionRepository() { - return new RedisExecutionRepository(registry, redisClientSelector, 1, 50) + return new RedisExecutionRepository(new NoopRegistry(), redisClientSelector, 1, 50) } - @Override RedisExecutionRepository createExecutionRepositoryPrevious() { - return new RedisExecutionRepository(registry, new RedisClientSelector([new JedisClientDelegate("primaryDefault", jedisPoolPrevious)]), 1, 50) + return new RedisExecutionRepository(new NoopRegistry(), new RedisClientSelector([new JedisClientDelegate("primaryDefault", jedisPoolPrevious)]), 1, 50) } diff --git a/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/cleanup/OldPipelineCleanupPollingNotificationAgentSpec.groovy b/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/cleanup/OldPipelineCleanupPollingNotificationAgentSpec.groovy index 09b42a1a2d1..a940fda711c 100644 --- a/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/cleanup/OldPipelineCleanupPollingNotificationAgentSpec.groovy +++ b/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/cleanup/OldPipelineCleanupPollingNotificationAgentSpec.groovy @@ -43,13 +43,15 @@ import static com.netflix.spinnaker.orca.api.pipeline.models.ExecutionType.PIPEL import static com.netflix.spinnaker.kork.sql.test.SqlTestUtil.initTcMysqlDatabase import static java.time.temporal.ChronoUnit.DAYS -class OldPipelineCleanupPollingNotificationAgentSpec extends Specification { +abstract class OldPipelineCleanupPollingNotificationAgentSpec extends Specification { @Shared ObjectMapper mapper = OrcaObjectMapper.newInstance().with { registerModule(new KotlinModule()) it } + abstract SqlTestUtil.TestDatabase getDatabase() + @Shared @AutoCleanup("close") SqlTestUtil.TestDatabase currentDatabase @@ -75,7 +77,7 @@ class OldPipelineCleanupPollingNotificationAgentSpec extends Specification { ) def setupSpec() { - currentDatabase = initTcMysqlDatabase() + currentDatabase = getDatabase() executionRepository = new SqlExecutionRepository("test", currentDatabase.context, mapper, new RetryProperties(), 10, 100, "poolName", null) } @@ -146,9 +148,16 @@ class OldPipelineCleanupPollingNotificationAgentSpec extends Specification { } } +class MySqlOldPipelineCleanupPollingNotificationAgentSpec extends OldPipelineCleanupPollingNotificationAgentSpec { + @Override + SqlTestUtil.TestDatabase getDatabase() { + return initTcMysqlDatabase() + } +} + class PgOldPipelineCleanupPollingNotificationAgentSpec extends OldPipelineCleanupPollingNotificationAgentSpec { - def setupSpec() { - currentDatabase = initTcPostgresDatabase() - executionRepository = new SqlExecutionRepository("test", currentDatabase.context, mapper, new RetryProperties(), 10, 100, "poolName", null) + @Override + SqlTestUtil.TestDatabase getDatabase() { + return initTcPostgresDatabase() } } diff --git a/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/pipeline/persistence/SqlPipelineExecutionRepositorySpec.groovy b/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/pipeline/persistence/SqlPipelineExecutionRepositorySpec.groovy index c87ff255dff..e4de3c5a3d4 100644 --- a/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/pipeline/persistence/SqlPipelineExecutionRepositorySpec.groovy +++ b/orca-sql/src/test/groovy/com/netflix/spinnaker/orca/sql/pipeline/persistence/SqlPipelineExecutionRepositorySpec.groovy @@ -38,6 +38,7 @@ import rx.schedulers.Schedulers import de.huxhorn.sulky.ulid.ULID import spock.lang.AutoCleanup import spock.lang.Shared +import spock.lang.Subject import spock.lang.Unroll import static com.netflix.spinnaker.kork.sql.test.SqlTestUtil.cleanupDb @@ -50,7 +51,7 @@ import static com.netflix.spinnaker.orca.pipeline.persistence.ExecutionRepositor import static com.netflix.spinnaker.orca.test.model.ExecutionBuilder.orchestration import static com.netflix.spinnaker.orca.test.model.ExecutionBuilder.pipeline -class SqlPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTck { +abstract class SqlPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTck { @Shared ObjectMapper mapper = OrcaObjectMapper.newInstance().with { @@ -60,6 +61,15 @@ class SqlPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTck< def ulid = new ULID() + @Shared + @Subject + ExecutionRepository repository + + @Override + ExecutionRepository repository() { + return repository + } + @Shared @AutoCleanup("close") TestDatabase currentDatabase @@ -68,25 +78,21 @@ class SqlPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTck< @AutoCleanup("close") TestDatabase previousDatabase + abstract TestDatabase getDatabase() + def setupSpec() { - currentDatabase = initDualTcMysqlDatabases() + currentDatabase = getDatabase() + repository = createExecutionRepository() } def cleanup() { cleanupDb(currentDatabase.context) - cleanupDb(currentDatabase.previousContext) } - @Override ExecutionRepository createExecutionRepository() { return createExecutionRepository("test") } - @Override - ExecutionRepository createExecutionRepositoryPrevious() { - new SqlExecutionRepository("test", currentDatabase.previousContext, mapper, new RetryProperties(), 10, 100, "poolName", null) - } - ExecutionRepository createExecutionRepository(String partition, Interlink interlink = null) { return com.netflix.spinnaker.kork.telemetry.InstrumentedProxy.proxy( new DefaultRegistry(), @@ -667,8 +673,16 @@ class SqlPipelineExecutionRepositorySpec extends PipelineExecutionRepositoryTck< } } +class MySqlPipelineExecutionRepositorySpec extends SqlPipelineExecutionRepositorySpec { + @Override + TestDatabase getDatabase() { + return initDualTcMysqlDatabases() + } +} + class PgSqlPipelineExecutionRepositorySpec extends SqlPipelineExecutionRepositorySpec { - def setupSpec() { - currentDatabase = initDualTcPostgresDatabases() + @Override + TestDatabase getDatabase() { + return initDualTcPostgresDatabases() } }