diff --git a/database/migration/src/main/resources/changelog.xml b/database/migration/src/main/resources/changelog.xml
index 49d568766b3..cecc4e7b709 100644
--- a/database/migration/src/main/resources/changelog.xml
+++ b/database/migration/src/main/resources/changelog.xml
@@ -90,6 +90,12 @@
+
+
diff --git a/database/migration/src/main/resources/changesets/set_table_role.xml b/database/migration/src/main/resources/changesets/set_table_role.xml
new file mode 100644
index 00000000000..297cf041d95
--- /dev/null
+++ b/database/migration/src/main/resources/changesets/set_table_role.xml
@@ -0,0 +1,40 @@
+
+
+
+
+
+
+
+
+
+
+
+ SELECT count(1)
+ FROM pg_roles
+ where '${engineSharedCromwellDbRole}' != '' and pg_roles.rolname = '${engineSharedCromwellDbRole}';
+
+
+
+ ALTER TABLE "CALL_CACHING_AGGREGATION_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "CALL_CACHING_DETRITUS_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "CALL_CACHING_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "CALL_CACHING_HASH_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "CALL_CACHING_SIMPLETON_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "DOCKER_HASH_STORE_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "JOB_KEY_VALUE_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "JOB_STORE_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "JOB_STORE_SIMPLETON_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "SUB_WORKFLOW_STORE_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "WORKFLOW_STORE_ENTRY" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "databasechangelog" OWNER TO ${engineSharedCromwellDbRole};
+ ALTER TABLE "databasechangeloglock" OWNER TO ${engineSharedCromwellDbRole};
+
+
+
+
diff --git a/database/sql/src/main/scala/cromwell/database/slick/MetadataSlickDatabase.scala b/database/sql/src/main/scala/cromwell/database/slick/MetadataSlickDatabase.scala
index c0723af6795..fbab736ae26 100644
--- a/database/sql/src/main/scala/cromwell/database/slick/MetadataSlickDatabase.scala
+++ b/database/sql/src/main/scala/cromwell/database/slick/MetadataSlickDatabase.scala
@@ -14,7 +14,6 @@ import cromwell.database.sql.tables.{
MetadataEntry,
WorkflowMetadataSummaryEntry
}
-import net.ceedubs.ficus.Ficus._
import slick.basic.DatabasePublisher
import slick.jdbc.{ResultSetConcurrency, ResultSetType}
@@ -76,8 +75,6 @@ class MetadataSlickDatabase(originalDatabaseConfig: Config)
import dataAccess.driver.api._
import MetadataSlickDatabase._
- lazy val pgLargeObjectWriteRole: Option[String] = originalDatabaseConfig.as[Option[String]]("pgLargeObjectWriteRole")
-
override def existsMetadataEntries()(implicit ec: ExecutionContext): Future[Boolean] = {
val action = dataAccess.metadataEntriesExists.result
runTransaction(action)
@@ -106,8 +103,6 @@ class MetadataSlickDatabase(originalDatabaseConfig: Config)
labelMetadataKey
)
- val roleSet = pgLargeObjectWriteRole.map(role => sqlu"""SET ROLE TO "#$role"""")
-
// These entries also require a write to the summary queue.
def writeSummarizable(): Future[Unit] = if (partitioned.summarizableMetadata.isEmpty) Future.successful(())
else {
@@ -116,7 +111,7 @@ class MetadataSlickDatabase(originalDatabaseConfig: Config)
val insertMetadata = dataAccess.metadataEntryIdsAutoInc ++= batch
insertMetadata.flatMap(ids => writeSummaryQueueEntries(ids))
}
- runTransaction(DBIO.sequence(roleSet ++ insertActions)).void
+ runTransaction(DBIO.sequence(insertActions)).void
}
// Non-summarizable metadata that only needs to go to the metadata table can be written much more efficiently
@@ -124,7 +119,7 @@ class MetadataSlickDatabase(originalDatabaseConfig: Config)
def writeNonSummarizable(): Future[Unit] = if (partitioned.nonSummarizableMetadata.isEmpty) Future.successful(())
else {
val action = DBIO.sequence(
- roleSet ++ partitioned.nonSummarizableMetadata.grouped(insertBatchSize).map(dataAccess.metadataEntries ++= _)
+ partitioned.nonSummarizableMetadata.grouped(insertBatchSize).map(dataAccess.metadataEntries ++= _)
)
runLobAction(action).void
}
diff --git a/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala b/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala
index 55c408f944f..bc9b085b1f1 100644
--- a/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala
+++ b/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala
@@ -72,6 +72,19 @@ abstract class SlickDatabase(override val originalDatabaseConfig: Config) extend
// NOTE: if you want to refactor database is inner-class type: this.dataAccess.driver.backend.DatabaseFactory
val database = slickConfig.db
+ /*
+ In some cases we want to write Postgres Large Objects (corresponding to Clob/Blob in Slick)
+ with a role other than the database user we are authenticated as. This is important
+ when we want multiple login users to be able to access the records. We can SET ROLE to
+ a role granted to all of them, and they'll all be able to access the Large Objects.
+ This SO thread also has a good explanation:
+ https://dba.stackexchange.com/questions/147607/postgres-large-objects-multiple-users
+ */
+ private lazy val pgLargeObjectWriteRole: Option[String] =
+ originalDatabaseConfig.as[Option[String]]("pgLargeObjectWriteRole")
+ private lazy val roleSetCmd =
+ pgLargeObjectWriteRole.map(role => sqlu"""SET LOCAL ROLE TO "#$role"""")
+
override lazy val connectionDescription: String = databaseConfig.getString(urlKey)
SlickDatabase.log.info(s"Running with database $urlKey = $connectionDescription")
@@ -167,7 +180,17 @@ abstract class SlickDatabase(override val originalDatabaseConfig: Config) extend
isolationLevel: TransactionIsolation = TransactionIsolation.RepeatableRead,
timeout: Duration = Duration.Inf
): Future[R] =
- runActionInternal(action.transactionally.withTransactionIsolation(isolationLevel), timeout = timeout)
+ runActionInternal(withLobRole(action).transactionally.withTransactionIsolation(isolationLevel), timeout = timeout)
+
+ /*
+ If we're using Postgres and have been configured to do so, set the desired role on the transaction.
+ See comments on `roleSetCmd` above for more information.
+ */
+ private def withLobRole[R](action: DBIO[R]): DBIO[R] =
+ (dataAccess.driver, roleSetCmd) match {
+ case (PostgresProfile, Some(roleSet)) => roleSet.andThen(action)
+ case _ => action
+ }
/* Note that this is only appropriate for actions that do not involve Blob
* or Clob fields in Postgres, since large object support requires running
diff --git a/docs/Configuring.md b/docs/Configuring.md
index aa810d19a0c..076295d115c 100644
--- a/docs/Configuring.md
+++ b/docs/Configuring.md
@@ -324,6 +324,34 @@ database {
}
```
+If you want multiple database users to be able to read Cromwell's data from a Postgresql database, you'll need to create a
+role that all relevant users have access to, and adjust Cromwell to use this role. This is because each Large Object is owned
+by, and only readable by, the role that wrote it.
+
+First, pass these options when executing Cromwell. They will ensure that Cromwell's database tables are
+owned by the role, not the initial login user.
+ * `-DengineSharedCromwellDbRole=your_role` to control the role that owns the engine tables
+ * `-DsharedCromwellDbRole=your_role` to control the role that owns the metadata tables
+
+Next, use the config key `pgLargeObjectWriteRole` to set the role that should own all large objects, as shown below.
+This config will have no effect if you aren't using Postgresql. The configured login user can be any user that is
+granted the shared role.
+
+```hocon
+database {
+ profile = "slick.jdbc.PostgresProfile$"
+ pgLargeObjectWriteRole = "your_role"
+ db {
+ driver = "org.postgresql.Driver"
+ url = "jdbc:postgresql://localhost:5432/cromwell"
+ user = "user"
+ password = "pass"
+ port = 5432
+ connectionTimeout = 5000
+ }
+}
+```
+
**Using Cromwell with file-based database (No server required)**
SQLite is currently not supported. However, HSQLDB does support running with a persistence file.