From 74b706340db538d62f9ce8fa9e04dab056ab060f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sun, 19 Sep 2021 10:22:21 +0100 Subject: [PATCH] general: introduce Fake migrations, that allow to only insert and update refinery schema history table. Add fake migrations to refinery_cli Add Target migrations to refinery_cli --- refinery/tests/mysql.rs | 64 +++++++++++++++++++ refinery/tests/mysql_async.rs | 70 +++++++++++++++++++++ refinery/tests/postgres.rs | 64 +++++++++++++++++++ refinery/tests/rusqlite.rs | 75 +++++++++++++++++++--- refinery/tests/tiberius.rs | 101 +++++++++++++++++++++++++++++- refinery/tests/tokio_postgres.rs | 80 +++++++++++++++++++++++ refinery_cli/src/cli.rs | 14 ++++- refinery_cli/src/migrate.rs | 22 ++++++- refinery_core/src/runner.rs | 7 ++- refinery_core/src/traits/async.rs | 28 ++++++--- refinery_core/src/traits/sync.rs | 27 +++++--- 11 files changed, 523 insertions(+), 29 deletions(-) diff --git a/refinery/tests/mysql.rs b/refinery/tests/mysql.rs index 19610de2..6b3e3f3d 100644 --- a/refinery/tests/mysql.rs +++ b/refinery/tests/mysql.rs @@ -625,6 +625,70 @@ mod mysql { }) } + #[test] + fn doesnt_run_migrations_if_fake() { + run_test(|| { + let opts = mysql::Opts::from_url("mysql://refinery:root@localhost:3306/refinery_test") + .unwrap(); + let pool = mysql::Pool::new(opts).unwrap(); + let mut conn = pool.get_conn().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run(&mut conn) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = conn.get_last_applied_migration().unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let mut row = + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'" + .run(conn) + .unwrap(); + assert!(row.next().is_none()); + }); + } + + #[test] + fn doesnt_run_migrations_if_fake_version() { + run_test(|| { + let opts = mysql::Opts::from_url("mysql://refinery:root@localhost:3306/refinery_test") + .unwrap(); + let pool = mysql::Pool::new(opts).unwrap(); + let mut conn = pool.get_conn().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run(&mut conn) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = conn.get_last_applied_migration().unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let mut row = + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'" + .run(conn) + .unwrap(); + assert!(row.next().is_none()); + }); + } + #[test] fn migrates_from_cli() { // cli only finds .sql migration files diff --git a/refinery/tests/mysql_async.rs b/refinery/tests/mysql_async.rs index 0128c93d..a2a2a8fd 100644 --- a/refinery/tests/mysql_async.rs +++ b/refinery/tests/mysql_async.rs @@ -656,4 +656,74 @@ mod mysql_async { }) .await; } + + #[tokio::test] + async fn doesnt_run_migrations_if_fake() { + run_test(async { + let mut pool = + mysql_async::Pool::new("mysql://refinery:root@localhost:3306/refinery_test"); + let mut conn = pool.get_conn().await.unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run_async(&mut pool) + .await + .unwrap(); + + let applied_migrations = report.applied_migrations(); + assert!(applied_migrations.is_empty()); + + let current = pool.get_last_applied_migration().await.unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row: Vec = conn + .query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + ) + .await + .unwrap(); + + assert!(row.is_empty()); + }) + .await; + } + + #[tokio::test] + async fn doesnt_run_migrations_if_fake_version() { + run_test(async { + let mut pool = + mysql_async::Pool::new("mysql://refinery:root@localhost:3306/refinery_test"); + let mut conn = pool.get_conn().await.unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run_async(&mut pool) + .await + .unwrap(); + + let applied_migrations = report.applied_migrations(); + assert!(applied_migrations.is_empty()); + + let current = pool.get_last_applied_migration().await.unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row: Vec = conn + .query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + ) + .await + .unwrap(); + + assert!(row.is_empty()); + }) + .await; + } } diff --git a/refinery/tests/postgres.rs b/refinery/tests/postgres.rs index f7e8fcbf..4c1859b9 100644 --- a/refinery/tests/postgres.rs +++ b/refinery/tests/postgres.rs @@ -607,6 +607,70 @@ mod postgres { }) } + #[test] + fn doesnt_run_migrations_if_fake() { + run_test(|| { + let mut client = + Client::connect("postgres://postgres@localhost:5432/postgres", NoTls).unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run(&mut client) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = client.get_last_applied_migration().unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row = &client + .query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + &[], + ) + .unwrap(); + assert!(row.is_empty()); + }); + } + + #[test] + fn doesnt_run_migrations_if_fake_version() { + run_test(|| { + let mut client = + Client::connect("postgres://postgres@localhost:5432/postgres", NoTls).unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run(&mut client) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = client.get_last_applied_migration().unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row = &client + .query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + &[], + ) + .unwrap(); + assert!(row.is_empty()); + }); + } + #[test] fn migrates_from_cli() { run_test(|| { diff --git a/refinery/tests/rusqlite.rs b/refinery/tests/rusqlite.rs index ad9ae218..866e8ec5 100644 --- a/refinery/tests/rusqlite.rs +++ b/refinery/tests/rusqlite.rs @@ -11,6 +11,7 @@ mod rusqlite { error::Kind, Migrate, Migration, Runner, Target, }; + use refinery_core::rusqlite::Error; use refinery_core::rusqlite::{Connection, OptionalExtension}; use std::fs::{self, File}; use std::process::Command; @@ -104,7 +105,7 @@ mod rusqlite { } #[test] - fn embedded_creates_migration_table() { + fn creates_migration_table() { let mut conn = Connection::open_in_memory().unwrap(); embedded::migrations::runner().run(&mut conn).unwrap(); let table_name: String = conn @@ -118,7 +119,7 @@ mod rusqlite { } #[test] - fn embedded_creates_migration_table_grouped_transaction() { + fn creates_migration_table_grouped_transaction() { let mut conn = Connection::open_in_memory().unwrap(); embedded::migrations::runner() .set_grouped(true) @@ -135,7 +136,7 @@ mod rusqlite { } #[test] - fn embedded_applies_migration() { + fn applies_migration() { let mut conn = Connection::open_in_memory().unwrap(); embedded::migrations::runner().run(&mut conn).unwrap(); @@ -155,7 +156,7 @@ mod rusqlite { } #[test] - fn embedded_applies_migration_grouped_transaction() { + fn applies_migration_grouped_transaction() { let mut conn = Connection::open_in_memory().unwrap(); embedded::migrations::runner() @@ -178,7 +179,7 @@ mod rusqlite { } #[test] - fn embedded_updates_schema_history() { + fn updates_schema_history() { let mut conn = Connection::open_in_memory().unwrap(); embedded::migrations::runner().run(&mut conn).unwrap(); @@ -191,7 +192,7 @@ mod rusqlite { } #[test] - fn embedded_updates_schema_history_grouped_transaction() { + fn updates_schema_history_grouped_transaction() { let mut conn = Connection::open_in_memory().unwrap(); embedded::migrations::runner() @@ -207,7 +208,7 @@ mod rusqlite { } #[test] - fn embedded_updates_to_last_working_if_not_grouped() { + fn updates_to_last_working_if_not_grouped() { let mut conn = Connection::open_in_memory().unwrap(); let result = broken::migrations::runner().run(&mut conn); @@ -234,7 +235,7 @@ mod rusqlite { } #[test] - fn embedded_doesnt_update_to_last_working_if_grouped() { + fn doesnt_update_to_last_working_if_grouped() { let mut conn = Connection::open_in_memory().unwrap(); let result = broken::migrations::runner() @@ -536,6 +537,64 @@ mod rusqlite { assert_eq!(migrations[4].checksum(), applied_migration.checksum()); } + #[test] + fn doesnt_run_migrations_if_fake_version() { + let mut conn = Connection::open_in_memory().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run(&mut conn) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = conn.get_last_applied_migration().unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let err: Result = conn.query_row( + "SELECT name FROM sqlite_master WHERE type='table' AND name='persons'", + [], + |row| row.get(0), + ); + + assert!(matches!(err.unwrap_err(), Error::QueryReturnedNoRows)); + } + + #[test] + fn doesnt_run_migrations_if_fake() { + let mut conn = Connection::open_in_memory().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run(&mut conn) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = conn.get_last_applied_migration().unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let err: Result = conn.query_row( + "SELECT name FROM sqlite_master WHERE type='table' AND name='persons'", + [], + |row| row.get(0), + ); + + assert!(matches!(err.unwrap_err(), Error::QueryReturnedNoRows)); + } + #[test] fn migrates_from_cli() { run_test(|| { diff --git a/refinery/tests/tiberius.rs b/refinery/tests/tiberius.rs index 1bc3b6da..1a941c9f 100644 --- a/refinery/tests/tiberius.rs +++ b/refinery/tests/tiberius.rs @@ -623,7 +623,6 @@ mod tiberius { let current = client.get_last_applied_migration().await.unwrap(); - dbg!(¤t); assert!(current.is_none()); // matches!(current, None); @@ -889,6 +888,106 @@ mod tiberius { .await; } + #[tokio::test] + async fn doesnt_run_migrations_if_fake() { + run_test(async { + let config = generate_config("refinery_test"); + + let tcp = tokio::net::TcpStream::connect(format!( + "{}:{}", + config.db_host().unwrap(), + config.db_port().unwrap() + )) + .await + .unwrap(); + let mut tconfig: TConfig = (&config).try_into().unwrap(); + tconfig.trust_cert(); + let mut client = tiberius::Client::connect(tconfig, tcp.compat_write()) + .await + .unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run_async(&mut client) + .await + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = client.get_last_applied_migration().await.unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row = client + .simple_query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + ) + .await + .unwrap() + .into_row() + .await + .unwrap(); + + assert!(row.is_none()); + }) + .await; + } + + #[tokio::test] + async fn doesnt_run_migrations_if_fake_version() { + run_test(async { + let config = generate_config("refinery_test"); + + let tcp = tokio::net::TcpStream::connect(format!( + "{}:{}", + config.db_host().unwrap(), + config.db_port().unwrap() + )) + .await + .unwrap(); + let mut tconfig: TConfig = (&config).try_into().unwrap(); + tconfig.trust_cert(); + let mut client = tiberius::Client::connect(tconfig, tcp.compat_write()) + .await + .unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run_async(&mut client) + .await + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = client.get_last_applied_migration().await.unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row = client + .simple_query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + ) + .await + .unwrap() + .into_row() + .await + .unwrap(); + + assert!(row.is_none()); + }) + .await; + } + // this is a blocking test, but shouldn't do arm running it inside tokio's runtime #[tokio::test] async fn migrates_from_cli() { diff --git a/refinery/tests/tokio_postgres.rs b/refinery/tests/tokio_postgres.rs index 17692977..d0c2a9ec 100644 --- a/refinery/tests/tokio_postgres.rs +++ b/refinery/tests/tokio_postgres.rs @@ -788,4 +788,84 @@ mod tokio_postgres { }) .await; } + + #[tokio::test] + async fn doesnt_run_migrations_if_fake() { + run_test(async { + let (mut client, connection) = + tokio_postgres::connect("postgres://postgres@localhost:5432/postgres", NoTls) + .await + .unwrap(); + + tokio::spawn(async move { + connection.await.unwrap(); + }); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run_async(&mut client) + .await + .unwrap(); + + let applied_migrations = report.applied_migrations(); + assert!(applied_migrations.is_empty()); + + let current = client.get_last_applied_migration().await.unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row = client + .query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + &[], + ) + .await + .unwrap(); + assert!(row.is_empty()); + }) + .await; + } + + #[tokio::test] + async fn doesnt_run_migrations_if_fake_version() { + run_test(async { + let (mut client, connection) = + tokio_postgres::connect("postgres://postgres@localhost:5432/postgres", NoTls) + .await + .unwrap(); + + tokio::spawn(async move { + connection.await.unwrap(); + }); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run_async(&mut client) + .await + .unwrap(); + + let applied_migrations = report.applied_migrations(); + assert!(applied_migrations.is_empty()); + + let current = client.get_last_applied_migration().await.unwrap().unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let row = client + .query( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + &[], + ) + .await + .unwrap(); + assert!(row.is_empty()); + }) + .await; + } } diff --git a/refinery_cli/src/cli.rs b/refinery_cli/src/cli.rs index ccc00869..c0ae93a4 100644 --- a/refinery_cli/src/cli.rs +++ b/refinery_cli/src/cli.rs @@ -26,9 +26,21 @@ pub fn create_cli() -> App<'static, 'static> { .arg( Arg::with_name("grouped") .short("g") - .help("run migrations grouped in a single transaction") + .help("if set runs migrations grouped in a single transaction") .takes_value(false), ) + .arg( + Arg::with_name("fake") + .short("f") + .help("if set do not actually runs migrations, just creates and updates refinery's schema migration table") + .takes_value(false), + ) + .arg( + Arg::with_name("target") + .short("t") + .help("if specified, migrates to the provided Target version") + .takes_value(true), + ) .arg( Arg::with_name("divergent") .short("d") diff --git a/refinery_cli/src/migrate.rs b/refinery_cli/src/migrate.rs index b685aee9..b2defcd5 100644 --- a/refinery_cli/src/migrate.rs +++ b/refinery_cli/src/migrate.rs @@ -2,7 +2,9 @@ use std::path::Path; use anyhow::Context; use clap::ArgMatches; -use refinery_core::{config::Config, find_migration_files, Migration, MigrationType, Runner}; +use refinery_core::{ + config::Config, find_migration_files, Migration, MigrationType, Runner, Target, +}; pub fn handle_migration_command(args: &ArgMatches) -> anyhow::Result<()> { //safe to call unwrap as we specified default values @@ -11,6 +13,8 @@ pub fn handle_migration_command(args: &ArgMatches) -> anyhow::Result<()> { let divergent = !args.is_present("divergent"); let missing = !args.is_present("missing"); let env_var_opt = args.value_of("env-var"); + let fake = args.is_present("fake"); + let target = args.value_of("target"); //safe to call unwrap as we specified default value let path = args.value_of("path").unwrap(); @@ -19,6 +23,8 @@ pub fn handle_migration_command(args: &ArgMatches) -> anyhow::Result<()> { grouped, divergent, missing, + fake, + target, env_var_opt, path, )?; @@ -30,6 +36,8 @@ fn run_migrations( grouped: bool, divergent: bool, missing: bool, + fake: bool, + target: Option<&str>, env_var_opt: Option<&str>, path: &str, ) -> anyhow::Result<()> { @@ -52,12 +60,24 @@ fn run_migrations( } let mut config = config(config_location, env_var_opt)?; + let target = match (fake, target) { + (true, None) => Target::Fake, + (false, None) => Target::Latest, + (true, Some(t)) => { + Target::FakeVersion(t.parse::().expect("could not parse target version")) + } + (false, Some(t)) => { + Target::Version(t.parse::().expect("could not parse target version")) + } + }; + cfg_if::cfg_if! { if #[cfg(any(feature = "mysql", feature = "postgresql", feature = "sqlite"))] { Runner::new(&migrations) .set_grouped(grouped) .set_abort_divergent(divergent) .set_abort_missing(missing) + .set_target(target) .run(&mut config)?; } } diff --git a/refinery_core/src/runner.rs b/refinery_core/src/runner.rs index e60a2d94..64078397 100644 --- a/refinery_core/src/runner.rs +++ b/refinery_core/src/runner.rs @@ -47,10 +47,12 @@ impl fmt::Debug for Type { } /// An enum set that represents the target version up to which refinery should migrate, it is used by [Runner] -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub enum Target { Latest, Version(u32), + Fake, + FakeVersion(u32), } // an Enum set that represents the state of the migration: Applied on the database, @@ -262,7 +264,8 @@ impl Runner { } /// Set the target version up to which refinery should migrate, Latest migrates to the latest version available - /// Version migrates to a user provided version, a Version with a higher version than the latest will be ignored. + /// Version migrates to a user provided version, a Version with a higher version than the latest will be ignored, + /// and Fake doesn't actually run any migration, just creates and updates refinery's schema migration table /// by default this is set to Latest pub fn set_target(self, target: Target) -> Runner { Runner { target, ..self } diff --git a/refinery_core/src/traits/async.rs b/refinery_core/src/traits/async.rs index be69ec4f..c95d7ffa 100644 --- a/refinery_core/src/traits/async.rs +++ b/refinery_core/src/traits/async.rs @@ -66,8 +66,9 @@ async fn migrate_grouped( ) -> Result { let mut grouped_migrations = Vec::new(); let mut applied_migrations = Vec::new(); + for mut migration in migrations.into_iter() { - if let Target::Version(input_target) = target { + if let Target::Version(input_target) | Target::FakeVersion(input_target) = target { if input_target < migration.version() { break; } @@ -81,15 +82,26 @@ async fn migrate_grouped( ); let sql = migration.sql().expect("sql must be Some!").to_string(); - applied_migrations.push(migration); - grouped_migrations.push(sql); + + // If Target is Fake, we only update schema migrations table + if !matches!(target, Target::Fake | Target::FakeVersion(_)) { + applied_migrations.push(migration); + grouped_migrations.push(sql); + } grouped_migrations.push(query); } - log::info!( - "going to apply batch migrations in single transaction: {:#?}", - applied_migrations.iter().map(ToString::to_string) - ); + match target { + Target::Fake | Target::FakeVersion(_) => { + log::info!("not going to apply any migration as fake flag is enabled"); + } + Target::Latest | Target::Version(_) => { + log::info!( + "going to apply batch migrations in single transaction: {:#?}", + applied_migrations.iter().map(ToString::to_string) + ); + } + }; if let Target::Version(input_target) = target { log::info!( @@ -159,7 +171,7 @@ where log::info!("no migrations to apply"); } - if grouped { + if grouped || matches!(target, Target::Fake | Target::FakeVersion(_)) { migrate_grouped(self, migrations, target).await } else { migrate(self, migrations, target).await diff --git a/refinery_core/src/traits/sync.rs b/refinery_core/src/traits/sync.rs index 1b905643..053ec6b5 100644 --- a/refinery_core/src/traits/sync.rs +++ b/refinery_core/src/traits/sync.rs @@ -60,7 +60,7 @@ fn migrate_grouped( let mut applied_migrations = Vec::new(); for migration in migrations.into_iter() { - if let Target::Version(input_target) = target { + if let Target::Version(input_target) | Target::FakeVersion(input_target) = target { if input_target < migration.version() { break; } @@ -71,15 +71,26 @@ fn migrate_grouped( migration.version(), migration.name(), Local::now().to_rfc3339(), migration.checksum().to_string() ); let sql = migration.sql().expect("sql must be Some!").to_string(); - applied_migrations.push(migration); - grouped_migrations.push(sql); + + // If Target is Fake, we only update schema migrations table + if !matches!(target, Target::Fake | Target::FakeVersion(_)) { + applied_migrations.push(migration); + grouped_migrations.push(sql); + } grouped_migrations.push(query); } - log::info!( - "going to apply batch migrations in single transaction: {:#?}", - applied_migrations.iter().map(ToString::to_string) - ); + match target { + Target::Fake | Target::FakeVersion(_) => { + log::info!("not going to apply any migration as fake flag is enabled"); + } + Target::Latest | Target::Version(_) => { + log::info!( + "going to apply batch migrations in single transaction: {:#?}", + applied_migrations.iter().map(ToString::to_string) + ); + } + }; if let Target::Version(input_target) = target { log::info!( @@ -141,7 +152,7 @@ where log::info!("no migrations to apply"); } - if grouped { + if grouped || matches!(target, Target::Fake | Target::FakeVersion(_)) { migrate_grouped(self, migrations, target) } else { migrate(self, migrations, target)