From 8c1df39640a0ac67b6ba0e00db95ebc272168e4f Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 28 Aug 2023 15:01:24 -0500 Subject: [PATCH 01/17] add silo_id to views::IpPool --- nexus/db-model/src/ip_pool.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 1 + nexus/types/src/external_api/views.rs | 1 + openapi/nexus.json | 5 +++++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index ca1d1d55fe..9ba9a810f7 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -73,7 +73,7 @@ impl IpPool { impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { identity: pool.identity() } + Self { identity: pool.identity(), silo_id: pool.silo_id } } } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index c48abd1030..71d59eaad9 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -113,6 +113,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(created_pool.identity.name, pool_name); assert_eq!(created_pool.identity.description, description); + assert_eq!(created_pool.silo_id, None); let list = NexusRequest::iter_collection_authn::( client, diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index b1f047f78b..bf31dc98b2 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -244,6 +244,7 @@ pub struct VpcRouter { pub struct IpPool { #[serde(flatten)] pub identity: IdentityMetadata, + pub silo_id: Option, } #[derive(Clone, Copy, Debug, Deserialize, Serialize, JsonSchema)] diff --git a/openapi/nexus.json b/openapi/nexus.json index 127f1309d0..a5f9aadd53 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -10141,6 +10141,11 @@ } ] }, + "silo_id": { + "nullable": true, + "type": "string", + "format": "uuid" + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", From 29cb45c02d73fb3bab2197467c29120d2ad71b06 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 28 Aug 2023 15:21:37 -0500 Subject: [PATCH 02/17] add silo param to IpPoolCreate --- nexus/db-queries/src/db/datastore/rack.rs | 2 ++ nexus/test-utils/src/resource_helpers.rs | 1 + nexus/tests/integration_tests/endpoints.rs | 1 + nexus/tests/integration_tests/ip_pools.rs | 4 ++++ nexus/types/src/external_api/params.rs | 4 ++++ openapi/nexus.json | 9 +++++++++ 6 files changed, 21 insertions(+) diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 22599a7907..044c1fcfe6 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -561,6 +561,7 @@ impl DataStore { name: SERVICE_IP_POOL_NAME.parse::().unwrap(), description: String::from("IP Pool for Oxide Services"), }, + silo: None, }; self.ip_pool_create(opctx, ¶ms, /*internal=*/ true) .await @@ -575,6 +576,7 @@ impl DataStore { name: "default".parse::().unwrap(), description: String::from("default IP pool"), }, + silo: None, }; self.ip_pool_create(opctx, ¶ms, /*internal=*/ false) .await diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index e27e0f6bf5..8b66f5a3ef 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -140,6 +140,7 @@ pub async fn create_ip_pool( name: pool_name.parse().unwrap(), description: String::from("an ip pool"), }, + silo: None, }, ) .await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index ff299d61e8..4df87d2851 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -454,6 +454,7 @@ lazy_static! { name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, + silo: None, }; pub static ref DEMO_IP_POOL_PROJ_URL: String = format!("/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME); diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 71d59eaad9..d2b642590a 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -102,6 +102,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, + silo: None, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -298,6 +299,7 @@ async fn test_ip_pool_range_overlapping_ranges_fails( name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, + silo: None, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -479,6 +481,7 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, + silo: None, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -615,6 +618,7 @@ async fn test_ip_pool_list_usable_by_project( name: String::from(mypool_name).parse().unwrap(), description: String::from("right on cue"), }, + silo: None, }; NexusRequest::objects_post(client, ip_pools_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 895746b785..4d8b40d8a9 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -731,6 +731,10 @@ impl std::fmt::Debug for CertificateCreate { pub struct IpPoolCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, + + /// If an IP pool is associated with a silo, instance IP allocations in that + /// silo can draw from that pool. + pub silo: Option, } /// Parameters for updating an IP Pool diff --git a/openapi/nexus.json b/openapi/nexus.json index a5f9aadd53..843c861967 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -10174,6 +10174,15 @@ }, "name": { "$ref": "#/components/schemas/Name" + }, + "silo": { + "nullable": true, + "description": "If an IP pool is associated with a silo, instance IP allocations in that silo can draw from that pool.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] } }, "required": [ From bd6107266e099481a0026520d0825a7bbd6bb851 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 29 Aug 2023 12:16:51 -0500 Subject: [PATCH 03/17] pass through silo name or ID to IP pool create, test with ID and name --- nexus/db-model/src/ip_pool.rs | 3 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 3 +- nexus/db-queries/src/db/datastore/rack.rs | 32 +++++---- .../db-queries/src/db/queries/external_ip.rs | 1 + nexus/src/app/ip_pool.rs | 17 ++++- nexus/src/external_api/http_entrypoints.rs | 12 +++- nexus/tests/integration_tests/ip_pools.rs | 69 +++++++++++++++++++ 7 files changed, 118 insertions(+), 19 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 9ba9a810f7..fc5f4de30d 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -57,6 +57,7 @@ impl IpPool { pub fn new( pool_identity: &external::IdentityMetadataCreateParams, internal: bool, + silo_id: Option, ) -> Self { Self { identity: IpPoolIdentity::new( @@ -65,7 +66,7 @@ impl IpPool { ), internal, rcgen: 0, - silo_id: None, + silo_id, project_id: None, } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index eb486d38f6..04193f47aa 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -149,12 +149,13 @@ impl DataStore { opctx: &OpContext, new_pool: ¶ms::IpPoolCreate, internal: bool, + silo_id: Option, ) -> CreateResult { use db::schema::ip_pool::dsl; opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; - let pool = IpPool::new(&new_pool.identity, internal); + let pool = IpPool::new(&new_pool.identity, internal, silo_id); let pool_name = pool.name().as_str().to_string(); diesel::insert_into(dsl::ip_pool) diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 044c1fcfe6..20278c5044 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -563,13 +563,15 @@ impl DataStore { }, silo: None, }; - self.ip_pool_create(opctx, ¶ms, /*internal=*/ true) - .await - .map(|_| ()) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(e), - })?; + self.ip_pool_create( + opctx, ¶ms, /*internal=*/ true, /*silo_id*/ None, + ) + .await + .map(|_| ()) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + })?; let params = external_params::IpPoolCreate { identity: IdentityMetadataCreateParams { @@ -578,13 +580,15 @@ impl DataStore { }, silo: None, }; - self.ip_pool_create(opctx, ¶ms, /*internal=*/ false) - .await - .map(|_| ()) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(e), - })?; + self.ip_pool_create( + opctx, ¶ms, /*internal=*/ false, /*silo_id*/ None, + ) + .await + .map(|_| ()) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + })?; Ok(()) } diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 9233c01764..e932269508 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -869,6 +869,7 @@ mod tests { description: format!("ip pool {}", name), }, internal, + None, // silo id ); use crate::db::schema::ip_pool::dsl as ip_pool_dsl; diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 55ef9f3959..b98b4e8885 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -24,6 +24,7 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; +use uuid::Uuid; impl super::Nexus { pub fn ip_pool_lookup<'a>( @@ -49,9 +50,18 @@ impl super::Nexus { &self, opctx: &OpContext, new_pool: ¶ms::IpPoolCreate, + // TODO: should this be passed here as a lookup::Silo? adding this flag + // all the way down the chain is clearly a smell. The `internal` arg + // is too, really, though it is kind of nice to see at each call what's + // internal. But I'm thinking maybe there should be an IpPoolCreate' + // that is produced from IpPoolCreate and includes internal and the silo + // id (or lookup) resolved from the NameOrId. + silo_id: Option, ) -> CreateResult { self.db_datastore - .ip_pool_create(opctx, new_pool, /* internal= */ false) + .ip_pool_create( + opctx, new_pool, /* internal= */ false, silo_id, + ) .await } @@ -61,7 +71,10 @@ impl super::Nexus { new_pool: ¶ms::IpPoolCreate, ) -> CreateResult { self.db_datastore - .ip_pool_create(opctx, new_pool, /* internal= */ true) + .ip_pool_create( + opctx, new_pool, /* internal= */ true, + /* silo_id= */ None, + ) .await } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index b5c37b4234..517216f110 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1210,7 +1210,17 @@ async fn ip_pool_create( let pool_params = pool_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; + let silo_id = match pool_params.clone().silo { + Some(silo) => { + let (.., authz_silo) = nexus + .silo_lookup(&opctx, silo)? + .lookup_for(authz::Action::Read) + .await?; + Some(authz_silo.id()) + } + _ => None, + }; + let pool = nexus.ip_pool_create(&opctx, &pool_params, silo_id).await?; Ok(HttpResponseCreated(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index d2b642590a..21f775105b 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -18,6 +18,7 @@ use nexus_test_utils::resource_helpers::{ }; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::IdentityMetadataUpdateParams; +use omicron_common::api::external::NameOrId; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::external_api::params::ExternalIpCreate; use omicron_nexus::external_api::params::InstanceDiskAttachment; @@ -271,6 +272,74 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { .expect("Expected to be able to delete an empty IP Pool"); } +#[nexus_test] +async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + // can create a pool with an existing silo by name + let params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: String::from("p0").parse().unwrap(), + description: String::from(""), + }, + silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), + }; + let created_pool = create_pool(client, ¶ms).await; + assert_eq!(created_pool.identity.name, "p0"); + assert!(created_pool.silo_id.is_some()); + + let silo_id = created_pool.silo_id.unwrap(); + + // now we'll create another IP pool using that silo ID + let params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: String::from("p1").parse().unwrap(), + description: String::from(""), + }, + silo: Some(NameOrId::Id(silo_id)), + }; + let created_pool = create_pool(client, ¶ms).await; + assert_eq!(created_pool.identity.name, "p1"); + assert_eq!(created_pool.silo_id.unwrap(), silo_id); + + // expect 404 if the specified silo doesn't exist + let bad_silo_params = IpPoolCreate { + identity: IdentityMetadataCreateParams { + name: String::from("p2").parse().unwrap(), + description: String::from(""), + }, + silo: Some(NameOrId::Name( + String::from("not-a-thing").parse().unwrap(), + )), + }; + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new(client, Method::POST, "/v1/system/ip-pools") + .body(Some(&bad_silo_params)) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(error.message, "not found: silo with name \"not-a-thing\""); +} + +async fn create_pool( + client: &ClientTestContext, + params: &IpPoolCreate, +) -> IpPool { + NexusRequest::objects_post(client, "/v1/system/ip-pools", params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + // Data for testing overlapping IP ranges struct TestRange { // A starting IP range that should be inserted correctly From 049fc0f44a7cb05744beb2d898307146d4ddb6b7 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 29 Aug 2023 14:01:48 -0500 Subject: [PATCH 04/17] change internal column to use silo id, add and populate default column --- Cargo.lock | 17 +++++- Cargo.toml | 1 + nexus/Cargo.toml | 1 + nexus/db-model/src/ip_pool.rs | 21 +++----- nexus/db-model/src/schema.rs | 3 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 27 +++++----- nexus/db-queries/src/db/datastore/project.rs | 3 +- nexus/db-queries/src/db/datastore/rack.rs | 52 +++++++++---------- .../db-queries/src/db/queries/external_ip.rs | 5 +- nexus/src/app/ip_pool.rs | 52 ++++++++----------- nexus/src/external_api/http_entrypoints.rs | 18 ++----- nexus/tests/integration_tests/schema.rs | 29 +++++++---- schema/crdb/3.0.0/up.sql | 38 +++++++++----- schema/crdb/3.0.1/up.sql | 25 +++++++++ schema/crdb/3.0.2/up.sql | 17 ++++++ schema/crdb/dbinit.sql | 27 +++++----- 16 files changed, 196 insertions(+), 140 deletions(-) create mode 100644 schema/crdb/3.0.1/up.sql create mode 100644 schema/crdb/3.0.2/up.sql diff --git a/Cargo.lock b/Cargo.lock index e12474f18c..469297fcb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5093,6 +5093,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "serde_with", + "similar-asserts", "sled-agent-client", "slog", "slog-async", @@ -7830,6 +7831,20 @@ name = "similar" version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" +dependencies = [ + "bstr 0.2.17", + "unicode-segmentation", +] + +[[package]] +name = "similar-asserts" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" +dependencies = [ + "console", + "similar", +] [[package]] name = "siphasher" @@ -9208,7 +9223,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "rand 0.8.5", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index 88411380c3..18744a0db1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -305,6 +305,7 @@ sha3 = "0.10.8" shell-words = "1.1.0" signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } +similar-asserts = "1.5.0" sled = "0.34" sled-agent-client = { path = "sled-agent-client" } sled-hardware = { path = "sled-hardware" } diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index c0ae92a054..33abdab366 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -68,6 +68,7 @@ serde.workspace = true serde_json.workspace = true serde_urlencoded.workspace = true serde_with.workspace = true +similar-asserts.workspace = true sled-agent-client.workspace = true slog.workspace = true slog-async.workspace = true diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index fc5f4de30d..7fe62045be 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -32,31 +32,27 @@ pub struct IpPool { #[diesel(embed)] pub identity: IpPoolIdentity, - /// If true, identifies that this IP pool is dedicated to "Control-Plane - /// Services", such as Nexus. - /// - /// Otherwise, this IP pool is intended for usage by customer VMs. - pub internal: bool, - /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, - /// Silo, if IP pool is associated with a particular silo. Must be null - /// if internal is true. Must be non-null if project_id is non-null. When - /// project_id is non-null, silo_id will (naturally) be the ID of the + /// Silo, if IP pool is associated with a particular silo. One special use + /// for this is associating a pool with the internal silo oxide-internal, + /// which is used for internal services. If there is no silo ID, the + /// pool is considered a fleet-wide silo and will be used for allocating + /// instance IPs in silos that don't have their own pool. Must be non- + /// null if project_id is non-null (this is enforced as a DB constraint). + /// When project_id is non-null, silo_id will (naturally) be the ID of the /// project's silo. pub silo_id: Option, - /// Project, if IP pool is associated with a particular project. Must be - /// null if internal is true. + /// Project, if IP pool is associated with a particular project pub project_id: Option, } impl IpPool { pub fn new( pool_identity: &external::IdentityMetadataCreateParams, - internal: bool, silo_id: Option, ) -> Self { Self { @@ -64,7 +60,6 @@ impl IpPool { Uuid::new_v4(), pool_identity.clone(), ), - internal, rcgen: 0, silo_id, project_id: None, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index e81722d3be..9cc4151147 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -452,7 +452,6 @@ table! { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, - internal -> Bool, rcgen -> Int8, silo_id -> Nullable, project_id -> Nullable, @@ -1131,7 +1130,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(3, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(3, 0, 2); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 04193f47aa..56300930a8 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -14,6 +14,7 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::error::diesel_pool_result_optional; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; +use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; use crate::db::model::IpPool; @@ -27,7 +28,6 @@ use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use chrono::Utc; use diesel::prelude::*; use ipnetwork::IpNetwork; -use nexus_types::external_api::params; use nexus_types::external_api::shared::IpRange; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; @@ -65,7 +65,8 @@ impl DataStore { &pagparams.map_name(|n| Name::ref_cast(n)), ), } - .filter(dsl::internal.eq(false)) + // != excludes nulls so we explicitly include them + .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(self.pool_authorized(opctx).await?) @@ -98,7 +99,8 @@ impl DataStore { .ip_pool_name(&name) .fetch_for(action) .await?; - if pool.internal { + // Can't look up the internal pool + if pool.silo_id == Some(*INTERNAL_SILO_ID) { return Err(authz_pool.not_found()); } @@ -120,7 +122,7 @@ impl DataStore { // Look up this IP pool by rack ID. let (authz_pool, pool) = dsl::ip_pool - .filter(dsl::internal.eq(true)) + .filter(dsl::silo_id.eq(*INTERNAL_SILO_ID)) .filter(dsl::time_deleted.is_null()) .select(IpPool::as_select()) .get_result_async(self.pool_authorized(opctx).await?) @@ -142,20 +144,15 @@ impl DataStore { } /// Creates a new IP pool. - /// - /// - If `internal` is set, this IP pool is used for Oxide services. pub async fn ip_pool_create( &self, opctx: &OpContext, - new_pool: ¶ms::IpPoolCreate, - internal: bool, - silo_id: Option, + pool: IpPool, ) -> CreateResult { use db::schema::ip_pool::dsl; opctx .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) .await?; - let pool = IpPool::new(&new_pool.identity, internal, silo_id); let pool_name = pool.name().as_str().to_string(); diesel::insert_into(dsl::ip_pool) @@ -205,7 +202,10 @@ impl DataStore { // in between the above check for children and this query. let now = Utc::now(); let updated_rows = diesel::update(dsl::ip_pool) - .filter(dsl::internal.eq(false)) + // != excludes nulls so we explicitly include them + .filter( + dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), + ) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) @@ -237,7 +237,10 @@ impl DataStore { use db::schema::ip_pool::dsl; opctx.authorize(authz::Action::Modify, authz_pool).await?; diesel::update(dsl::ip_pool) - .filter(dsl::internal.eq(false)) + // != excludes nulls so we explicitly include them + .filter( + dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null()), + ) .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .set(updates) diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index 9ae520a97a..b3759f9cce 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -351,7 +351,8 @@ impl DataStore { } // TODO(2148, 2056): filter only pools accessible by the given // project, once specific projects for pools are implemented - .filter(dsl::internal.eq(false)) + // != excludes nulls so we explicitly include them + .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) .get_results_async(self.pool_authorized(opctx).await?) diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 20278c5044..4d451bf1c6 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -15,6 +15,7 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; +use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; @@ -556,39 +557,34 @@ impl DataStore { self.rack_insert(opctx, &db::model::Rack::new(rack_id)).await?; - let params = external_params::IpPoolCreate { - identity: IdentityMetadataCreateParams { + let internal_pool = db::model::IpPool::new( + &IdentityMetadataCreateParams { name: SERVICE_IP_POOL_NAME.parse::().unwrap(), description: String::from("IP Pool for Oxide Services"), }, - silo: None, - }; - self.ip_pool_create( - opctx, ¶ms, /*internal=*/ true, /*silo_id*/ None, - ) - .await - .map(|_| ()) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(e), - })?; + Some(*INTERNAL_SILO_ID), + ); + + self.ip_pool_create(opctx, internal_pool).await.map(|_| ()).or_else( + |e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + }, + )?; - let params = external_params::IpPoolCreate { - identity: IdentityMetadataCreateParams { + let default_pool = db::model::IpPool::new( + &IdentityMetadataCreateParams { name: "default".parse::().unwrap(), description: String::from("default IP pool"), }, - silo: None, - }; - self.ip_pool_create( - opctx, ¶ms, /*internal=*/ false, /*silo_id*/ None, - ) - .await - .map(|_| ()) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(e), - })?; + None, // no silo ID, fleet scoped + ); + self.ip_pool_create(opctx, default_pool).await.map(|_| ()).or_else( + |e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + }, + )?; Ok(()) } @@ -1104,7 +1100,7 @@ mod test { // been allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - assert!(svc_pool.internal); + assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); @@ -1306,7 +1302,7 @@ mod test { // allocated as a part of the service IP pool. let (.., svc_pool) = datastore.ip_pools_service_lookup(&opctx).await.unwrap(); - assert!(svc_pool.internal); + assert_eq!(svc_pool.silo_id, Some(*INTERNAL_SILO_ID)); let observed_ip_pool_ranges = get_all_ip_pool_ranges(&datastore).await; assert_eq!(observed_ip_pool_ranges.len(), 1); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index e932269508..1ad18651c7 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -816,6 +816,7 @@ mod tests { use crate::context::OpContext; use crate::db::datastore::DataStore; use crate::db::datastore::SERVICE_IP_POOL_NAME; + use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; use crate::db::model::IpKind; use crate::db::model::IpPool; @@ -862,14 +863,12 @@ mod tests { } async fn create_ip_pool(&self, name: &str, range: IpRange) { - let internal = false; let pool = IpPool::new( &IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), description: format!("ip pool {}", name), }, - internal, - None, // silo id + Some(*INTERNAL_SILO_ID), ); use crate::db::schema::ip_pool::dsl as ip_pool_dsl; diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index b98b4e8885..b6d5040a30 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -12,7 +12,9 @@ use crate::db::model::Name; use crate::external_api::params; use crate::external_api::shared::IpRange; use ipnetwork::IpNetwork; +use nexus_db_model::IpPool; use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -24,7 +26,10 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; -use uuid::Uuid; + +fn is_internal(pool: &IpPool) -> bool { + pool.silo_id == Some(*INTERNAL_SILO_ID) +} impl super::Nexus { pub fn ip_pool_lookup<'a>( @@ -49,33 +54,20 @@ impl super::Nexus { pub async fn ip_pool_create( &self, opctx: &OpContext, - new_pool: ¶ms::IpPoolCreate, - // TODO: should this be passed here as a lookup::Silo? adding this flag - // all the way down the chain is clearly a smell. The `internal` arg - // is too, really, though it is kind of nice to see at each call what's - // internal. But I'm thinking maybe there should be an IpPoolCreate' - // that is produced from IpPoolCreate and includes internal and the silo - // id (or lookup) resolved from the NameOrId. - silo_id: Option, - ) -> CreateResult { - self.db_datastore - .ip_pool_create( - opctx, new_pool, /* internal= */ false, silo_id, - ) - .await - } - - pub async fn ip_pool_services_create( - &self, - opctx: &OpContext, - new_pool: ¶ms::IpPoolCreate, + pool_params: ¶ms::IpPoolCreate, ) -> CreateResult { - self.db_datastore - .ip_pool_create( - opctx, new_pool, /* internal= */ true, - /* silo_id= */ None, - ) - .await + let silo_id = match pool_params.clone().silo { + Some(silo) => { + let (.., authz_silo) = self + .silo_lookup(&opctx, silo)? + .lookup_for(authz::Action::Read) + .await?; + Some(authz_silo.id()) + } + _ => None, + }; + let pool = db::model::IpPool::new(&pool_params.identity, silo_id); + self.db_datastore.ip_pool_create(opctx, pool).await } pub async fn ip_pools_list( @@ -117,7 +109,7 @@ impl super::Nexus { ) -> ListResultVec { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::ListChildren).await?; - if db_pool.internal { + if is_internal(&db_pool) { return Err(Error::not_found_by_name( ResourceType::IpPool, &db_pool.identity.name, @@ -137,7 +129,7 @@ impl super::Nexus { ) -> UpdateResult { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; - if db_pool.internal { + if is_internal(&db_pool) { return Err(Error::not_found_by_name( ResourceType::IpPool, &db_pool.identity.name, @@ -154,7 +146,7 @@ impl super::Nexus { ) -> DeleteResult { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; - if db_pool.internal { + if is_internal(&db_pool) { return Err(Error::not_found_by_name( ResourceType::IpPool, &db_pool.identity.name, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 517216f110..bef578649f 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -39,9 +39,11 @@ use dropshot::{ channel, endpoint, WebsocketChannelResult, WebsocketConnection, }; use ipnetwork::IpNetwork; -use nexus_db_queries::authz::ApiResource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; +use nexus_db_queries::{ + authz::ApiResource, db::fixed_data::silo::INTERNAL_SILO_ID, +}; use nexus_types::external_api::params::ProjectSelector; use nexus_types::{ external_api::views::{SledInstance, Switch}, @@ -1149,7 +1151,7 @@ async fn project_ip_pool_view( .await?; // TODO(2148): once we've actualy implemented filtering to pools belonging to // the specified project, we can remove this internal check. - if pool.internal { + if pool.silo_id == Some(*INTERNAL_SILO_ID) { return Err(authz_pool.not_found().into()); } Ok(HttpResponseOk(IpPool::from(pool))) @@ -1210,17 +1212,7 @@ async fn ip_pool_create( let pool_params = pool_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo_id = match pool_params.clone().silo { - Some(silo) => { - let (.., authz_silo) = nexus - .silo_lookup(&opctx, silo)? - .lookup_for(authz::Action::Read) - .await?; - Some(authz_silo.id()) - } - _ => None, - }; - let pool = nexus.ip_pool_create(&opctx, &pool_params, silo_id).await?; + let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; Ok(HttpResponseCreated(IpPool::from(pool))) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 88177c8e98..9bbfa93f40 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -12,6 +12,7 @@ use omicron_common::nexus_config::Config; use omicron_common::nexus_config::SchemaConfig; use omicron_test_utils::dev::db::CockroachInstance; use pretty_assertions::assert_eq; +use similar_asserts; use slog::Logger; use std::collections::{BTreeMap, BTreeSet}; use std::path::PathBuf; @@ -470,16 +471,24 @@ struct InformationSchema { impl InformationSchema { fn pretty_assert_eq(&self, other: &Self) { - // TODO: We could manually iterate here too - the Debug outputs for - // each of these is pretty large, and can be kinda painful to read - // when comparing e.g. "All columns that exist in the database". - assert_eq!(self.columns, other.columns); - assert_eq!(self.check_constraints, other.check_constraints); - assert_eq!(self.key_column_usage, other.key_column_usage); - assert_eq!(self.referential_constraints, other.referential_constraints); - assert_eq!(self.views, other.views); - assert_eq!(self.statistics, other.statistics); - assert_eq!(self.tables, other.tables); + // similar_asserts gets us nice diff that only includes the relevant context. + // the columns diff especially needs this: it can be 20k lines otherwise + similar_asserts::assert_eq!(self.columns, other.columns); + similar_asserts::assert_eq!( + self.check_constraints, + other.check_constraints + ); + similar_asserts::assert_eq!( + self.key_column_usage, + other.key_column_usage + ); + similar_asserts::assert_eq!( + self.referential_constraints, + other.referential_constraints + ); + similar_asserts::assert_eq!(self.views, other.views); + similar_asserts::assert_eq!(self.statistics, other.statistics); + similar_asserts::assert_eq!(self.tables, other.tables); } async fn new(crdb: &CockroachInstance) -> Self { diff --git a/schema/crdb/3.0.0/up.sql b/schema/crdb/3.0.0/up.sql index b2883a933a..7529c5d180 100644 --- a/schema/crdb/3.0.0/up.sql +++ b/schema/crdb/3.0.0/up.sql @@ -1,11 +1,3 @@ --- CRDB documentation recommends the following: --- "Execute schema changes either as single statements (as an implicit transaction), --- or in an explicit transaction consisting of the single schema change statement." --- --- For each schema change, we transactionally: --- 1. Check the current version --- 2. Apply the idempotent update - BEGIN; SELECT CAST( @@ -20,17 +12,35 @@ SELECT CAST( ); ALTER TABLE omicron.public.ip_pool + ADD COLUMN IF NOT EXISTS "default" BOOLEAN NOT NULL DEFAULT FALSE, + ADD COLUMN IF NOT EXISTS silo_ID UUID, ADD COLUMN IF NOT EXISTS project_id UUID, - + -- if silo_id is null, then project_id must be null ADD CONSTRAINT IF NOT EXISTS project_implies_silo CHECK ( NOT ((silo_id IS NULL) AND (project_id IS NOT NULL)) - ), - - -- if internal = true, non-null silo_id and project_id are not allowed - ADD CONSTRAINT IF NOT EXISTS internal_pools_have_null_silo_and_project CHECK ( - NOT (INTERNAL AND ((silo_id IS NOT NULL) OR (project_id IS NOT NULL))) ); +COMMIT; + +-- needs to be in its own transaction because of this thrilling bug +-- https://github.com/cockroachdb/cockroach/issues/83593 +BEGIN; + +SELECT CAST( + IF( + ( + SELECT version = '2.0.0' and target_version = '3.0.0' + FROM omicron.public.db_metadata WHERE singleton = true + ), + 'true', + 'Invalid starting version for schema change' + ) AS BOOL +); + +CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( + silo_id, project_id +) WHERE + "default" AND time_deleted IS NULL; COMMIT; diff --git a/schema/crdb/3.0.1/up.sql b/schema/crdb/3.0.1/up.sql new file mode 100644 index 0000000000..def0192837 --- /dev/null +++ b/schema/crdb/3.0.1/up.sql @@ -0,0 +1,25 @@ +BEGIN; + +SELECT CAST( + IF( + ( + SELECT version = '3.0.0' and target_version = '3.0.1' + FROM omicron.public.db_metadata WHERE singleton = true + ), + 'true', + 'Invalid starting version for schema change' + ) AS BOOL +); + +-- to get ready to drop the internal column, take any IP pools with internal = +-- true and set silo_id = INTERNAL_SILO_ID + +UPDATE omicron.public.ip_pool + SET silo_id = '001de000-5110-4000-8000-000000000001' + WHERE internal = true and time_deleted is null; + +UPDATE omicron.public.ip_pool + SET "default" = true + WHERE name = 'default' and time_deleted is null; + +COMMIT; diff --git a/schema/crdb/3.0.2/up.sql b/schema/crdb/3.0.2/up.sql new file mode 100644 index 0000000000..70314a00b9 --- /dev/null +++ b/schema/crdb/3.0.2/up.sql @@ -0,0 +1,17 @@ +BEGIN; + +SELECT CAST( + IF( + ( + SELECT version = '3.0.1' and target_version = '3.0.2' + FROM omicron.public.db_metadata WHERE singleton = true + ), + 'true', + 'Invalid starting version for schema change' + ) AS BOOL +); + +ALTER TABLE omicron.public.ip_pool + DROP COLUMN IF EXISTS internal; + +COMMIT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 91710a5c00..d25f922cb0 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1480,17 +1480,15 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( time_modified TIMESTAMPTZ NOT NULL, time_deleted TIMESTAMPTZ, - /* Identifies if the IP Pool is dedicated to Control Plane services */ - internal BOOL NOT NULL, - /* The collection's child-resource generation number */ rcgen INT8 NOT NULL, + "default" BOOLEAN NOT NULL DEFAULT FALSE, + /* - * Fields representating association with a silo or project. silo_id must be - * non-null if project_id is non-null. When project_id is non-null, silo_id - * will (naturally) be the ID of the project's silo. Both must be null if - * internal is true, i.e., internal IP pools must be fleet-level pools. + * Fields representating association with a silo or project. silo_id must + * be non-null if project_id is non-null. silo_id is also used to mark an IP + * pool as "internal" by associating it with the oxide-internal silo. */ silo_id UUID, project_id UUID, @@ -1498,14 +1496,17 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( -- if silo_id is null, then project_id must be null CONSTRAINT project_implies_silo CHECK ( NOT ((silo_id IS NULL) AND (project_id IS NOT NULL)) - ), - - -- if internal = true, non-null silo_id and project_id are not allowed - CONSTRAINT internal_pools_have_null_silo_and_project CHECK ( - NOT (INTERNAL AND ((silo_id IS NOT NULL) OR (project_id IS NOT NULL))) ) ); +/* + * Ensure there can only be one default pool for the fleet or a given silo or project + */ +CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( + silo_id, project_id +) WHERE + "default" = true AND time_deleted IS NULL; + /* * Index ensuring uniqueness of IP Pool names, globally. */ @@ -2562,7 +2563,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '3.0.0', NULL) + ( TRUE, NOW(), NOW(), '3.0.2', NULL) ON CONFLICT DO NOTHING; COMMIT; From b151e0815e90af752fa649ec0d3235a9face3613 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 29 Aug 2023 21:40:31 -0500 Subject: [PATCH 05/17] new default IP pool logic with fallback to higher levels --- nexus/db-model/src/ip_pool.rs | 4 + nexus/db-model/src/schema.rs | 1 + .../src/db/datastore/external_ip.rs | 7 + nexus/db-queries/src/db/datastore/ip_pool.rs | 133 ++++++++++++++++-- nexus/db-queries/src/db/datastore/rack.rs | 2 + .../db-queries/src/db/queries/external_ip.rs | 5 +- nexus/src/app/ip_pool.rs | 6 +- nexus/src/app/sagas/instance_create.rs | 10 +- nexus/test-utils/src/resource_helpers.rs | 1 + nexus/tests/integration_tests/endpoints.rs | 2 + nexus/tests/integration_tests/instances.rs | 3 + nexus/tests/integration_tests/ip_pools.rs | 7 + nexus/types/src/external_api/params.rs | 2 + 13 files changed, 169 insertions(+), 14 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 7fe62045be..3040caf278 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -48,12 +48,15 @@ pub struct IpPool { /// Project, if IP pool is associated with a particular project pub project_id: Option, + + pub default: bool, } impl IpPool { pub fn new( pool_identity: &external::IdentityMetadataCreateParams, silo_id: Option, + default: bool, ) -> Self { Self { identity: IpPoolIdentity::new( @@ -63,6 +66,7 @@ impl IpPool { rcgen: 0, silo_id, project_id: None, + default, } } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 9cc4151147..7736cbdb8a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -455,6 +455,7 @@ table! { rcgen -> Int8, silo_id -> Nullable, project_id -> Nullable, + default -> Bool, } } diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 99977cc744..be7522456c 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -58,6 +58,13 @@ impl DataStore { let name = pool_name.unwrap_or_else(|| { Name(ExternalName::from_str("default").unwrap()) }); + + // TODO: this is the whole thing -- update this call to EITHER look up + // the pool by name (only allowing access to pools matching the scope of + // the current project, i.e., you can pick a pool by name only if it's + // scoped to your project, silo, or the whole fleet) or use the default + // logic in ip_pool_fetch_default_for + let (.., pool) = self .ip_pools_fetch_for(opctx, authz::Action::CreateChild, &name) .await?; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 56300930a8..05b1ff0180 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -37,11 +37,9 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; -use omicron_common::api::external::Name as ExternalName; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; -use std::str::FromStr; use uuid::Uuid; impl DataStore { @@ -74,18 +72,42 @@ impl DataStore { .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } - /// Looks up the default IP pool by name. + /// Looks up the default IP pool for a given scope, i.e., a given + /// combination of silo and project ID (or none). If there is no default at + /// a given scope, fall back up a level. There should always be a default at + /// fleet level, though this query can theoretically fail. pub async fn ip_pools_fetch_default_for( &self, opctx: &OpContext, action: authz::Action, - ) -> LookupResult<(authz::IpPool, IpPool)> { - self.ip_pools_fetch_for( - opctx, - action, - &Name(ExternalName::from_str("default").unwrap()), - ) - .await + silo_id: Option, + project_id: Option, + ) -> LookupResult { + use db::schema::ip_pool::dsl; + opctx.authorize(action, &authz::IP_POOL_LIST).await?; + + dsl::ip_pool + .filter(dsl::silo_id.eq(silo_id).or(dsl::silo_id.is_null())) + .filter( + dsl::project_id.eq(project_id).or(dsl::project_id.is_null()), + ) + .filter(dsl::default.eq(true)) + .filter(dsl::time_deleted.is_null()) + // this will sort by most specific first, i.e., + // + // (silo, project) + // (silo, null) + // (null, null) + // + // then by only taking the first result, we get the most specific one + .order(( + dsl::project_id.asc().nulls_last(), + dsl::silo_id.asc().nulls_last(), + )) + .select(IpPool::as_select()) + .first_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } /// Looks up an IP pool by name. @@ -429,3 +451,94 @@ impl DataStore { } } } + +#[cfg(test)] +mod test { + use crate::authz; + use crate::db::datastore::datastore_test; + use crate::db::model::IpPool; + use nexus_test_utils::db::test_setup_database; + use nexus_types::identity::Resource; + use omicron_common::api::external::IdentityMetadataCreateParams; + use omicron_test_utils::dev; + + #[tokio::test] + async fn test_default_ip_pools() { + let logctx = dev::test_setup_log("test_default_ip_pools"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let action = authz::Action::ListChildren; + + // we start out with the default fleet-level pool already created, + // so when we ask for the fleet default (no silo or project) we get it back + let fleet_default_pool = datastore + .ip_pools_fetch_default_for(&opctx, action, None, None) + .await + .unwrap(); + + assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); + assert!(fleet_default_pool.default); + assert_eq!(fleet_default_pool.silo_id, None); + assert_eq!(fleet_default_pool.project_id, None); + + // now the interesting thing is that when we fetch the default pool for + // a particular silo or a particular project, if those scopes do not + // have a default IP pool, we will still get back the fleet default + + // default for "current" silo is still the fleet default one because it + // has no default of its own + let silo_id = opctx.authn.silo_required().unwrap().id(); + let ip_pool = datastore + .ip_pools_fetch_default_for(&opctx, action, Some(silo_id), None) + .await + .expect("Failed to get silo's default IP pool"); + assert_eq!(ip_pool.id(), fleet_default_pool.id()); + + // create a non-default pool for the silo + let identity = IdentityMetadataCreateParams { + name: "non-default-for-silo".parse().unwrap(), + description: "".to_string(), + }; + let _ = datastore + .ip_pool_create( + &opctx, + IpPool::new(&identity, Some(silo_id), /*default= */ false), + ) + .await; + + // because that one was not a default, when we ask for silo default + // pool, we still get the fleet default + let ip_pool = datastore + .ip_pools_fetch_default_for(&opctx, action, Some(silo_id), None) + .await + .expect("Failed to get fleet default IP pool"); + assert_eq!(ip_pool.id(), fleet_default_pool.id()); + + // now create a default pool for the silo + let identity = IdentityMetadataCreateParams { + name: "default-for-silo".parse().unwrap(), + description: "".to_string(), + }; + let _ = datastore + .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) + .await; + + // now when we ask for the silo default pool, we get the one we just made + let ip_pool = datastore + .ip_pools_fetch_default_for(&opctx, action, Some(silo_id), None) + .await + .expect("Failed to get silo's default IP pool"); + assert_eq!(ip_pool.name().as_str(), "default-for-silo"); + + // and of course, if we ask for the fleet default again we still get that one + let ip_pool = datastore + .ip_pools_fetch_default_for(&opctx, action, None, None) + .await + .expect("Failed to get fleet default IP pool"); + assert_eq!(ip_pool.id(), fleet_default_pool.id()); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 4d451bf1c6..e507550fa9 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -563,6 +563,7 @@ impl DataStore { description: String::from("IP Pool for Oxide Services"), }, Some(*INTERNAL_SILO_ID), + true, // default for internal silo ); self.ip_pool_create(opctx, internal_pool).await.map(|_| ()).or_else( @@ -578,6 +579,7 @@ impl DataStore { description: String::from("default IP pool"), }, None, // no silo ID, fleet scoped + true, // default for fleet ); self.ip_pool_create(opctx, default_pool).await.map(|_| ()).or_else( |e| match e { diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 1ad18651c7..1849467ccc 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -869,6 +869,7 @@ mod tests { description: format!("ip pool {}", name), }, Some(*INTERNAL_SILO_ID), + true, ); use crate::db::schema::ip_pool::dsl as ip_pool_dsl; @@ -916,11 +917,13 @@ mod tests { } async fn default_pool_id(&self) -> Uuid { - let (.., pool) = self + let pool = self .db_datastore .ip_pools_fetch_default_for( &self.opctx, crate::authz::Action::ListChildren, + None, + None, ) .await .expect("Failed to lookup default ip pool"); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index b6d5040a30..6709754049 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -66,7 +66,11 @@ impl super::Nexus { } _ => None, }; - let pool = db::model::IpPool::new(&pool_params.identity, silo_id); + let pool = db::model::IpPool::new( + &pool_params.identity, + silo_id, + pool_params.default, + ); self.db_datastore.ip_pool_create(opctx, pool).await } diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 9e7693b140..92c5cd67af 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -826,8 +826,14 @@ async fn sic_allocate_instance_snat_ip( let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; - let (.., pool) = datastore - .ip_pools_fetch_default_for(&opctx, authz::Action::CreateChild) + let pool = datastore + .ip_pools_fetch_default_for( + &opctx, + authz::Action::CreateChild, + // TODO: get these values right + None, + None, + ) .await .map_err(ActionError::action_failed)?; let pool_id = pool.identity.id; diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 8b66f5a3ef..77a2ac06cc 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -141,6 +141,7 @@ pub async fn create_ip_pool( description: String::from("an ip pool"), }, silo: None, + default: false, }, ) .await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 4df87d2851..3abf5a2e7f 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -455,6 +455,8 @@ lazy_static! { description: String::from("an IP pool"), }, silo: None, + // TODO: this might error due to the unique index + default: true, }; pub static ref DEMO_IP_POOL_PROJ_URL: String = format!("/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index d9fb161def..a3f4fe0e9a 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3434,6 +3434,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); } +// TODO: this test fails #[nexus_test] async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; @@ -3502,6 +3503,8 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::SiloUser(user_id)) .execute() .await + // fails here with 403 that comes from the default IP pool query, + // which checks ListChildren on IP_POOL_LIST .expect("Failed to create instance") .parsed_body::() .expect("Failed to parse instance"); diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 21f775105b..e67b4f187d 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -104,6 +104,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { description: String::from(description), }, silo: None, + default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -283,6 +284,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { description: String::from(""), }, silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), + default: false, }; let created_pool = create_pool(client, ¶ms).await; assert_eq!(created_pool.identity.name, "p0"); @@ -297,6 +299,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { description: String::from(""), }, silo: Some(NameOrId::Id(silo_id)), + default: false, }; let created_pool = create_pool(client, ¶ms).await; assert_eq!(created_pool.identity.name, "p1"); @@ -311,6 +314,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { silo: Some(NameOrId::Name( String::from("not-a-thing").parse().unwrap(), )), + default: false, }; let error: HttpErrorResponseBody = NexusRequest::new( RequestBuilder::new(client, Method::POST, "/v1/system/ip-pools") @@ -369,6 +373,7 @@ async fn test_ip_pool_range_overlapping_ranges_fails( description: String::from(description), }, silo: None, + default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -551,6 +556,7 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { description: String::from(description), }, silo: None, + default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -688,6 +694,7 @@ async fn test_ip_pool_list_usable_by_project( description: String::from("right on cue"), }, silo: None, + default: false, }; NexusRequest::objects_post(client, ip_pools_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 4d8b40d8a9..c16b1175a0 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -735,6 +735,8 @@ pub struct IpPoolCreate { /// If an IP pool is associated with a silo, instance IP allocations in that /// silo can draw from that pool. pub silo: Option, + + pub default: bool, } /// Parameters for updating an IP Pool From 6455ec3c6d6cfc5881e191e9921c931b6f795389 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 29 Aug 2023 23:31:38 -0500 Subject: [PATCH 06/17] fix outdated comment on external IP queries, bump openapi spec --- nexus/db-queries/src/db/queries/external_ip.rs | 2 +- openapi/nexus.json | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 1849467ccc..e42a9c39a0 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -74,7 +74,7 @@ const MAX_PORT: u16 = u16::MAX; /// /// In general, the query: /// -/// - Selects the next available IP address and port range from _any_ IP Pool +/// - Selects the next available IP address and port range from the specified IP pool /// - Inserts that record into the `external_ip` table /// - Updates the rcgen and time modified of the parent `ip_pool_range` table /// diff --git a/openapi/nexus.json b/openapi/nexus.json index 843c861967..fc97417330 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -10169,6 +10169,9 @@ "description": "Create-time parameters for an `IpPool`", "type": "object", "properties": { + "default": { + "type": "boolean" + }, "description": { "type": "string" }, @@ -10186,6 +10189,7 @@ } }, "required": [ + "default", "description", "name" ] From 1c047dc3a4d5d921c783e9f21c05fe8e4ae313f7 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Tue, 29 Aug 2023 23:49:40 -0500 Subject: [PATCH 07/17] fix unique index preventing multiple defaults per scope --- nexus/db-queries/src/db/datastore/ip_pool.rs | 28 +++++++++++++++++++- schema/crdb/3.0.0/up.sql | 5 ++-- schema/crdb/dbinit.sql | 7 +++-- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 05b1ff0180..50665e91ab 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -457,9 +457,10 @@ mod test { use crate::authz; use crate::db::datastore::datastore_test; use crate::db::model::IpPool; + use assert_matches::assert_matches; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; - use omicron_common::api::external::IdentityMetadataCreateParams; + use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; use omicron_test_utils::dev; #[tokio::test] @@ -482,6 +483,20 @@ mod test { assert_eq!(fleet_default_pool.silo_id, None); assert_eq!(fleet_default_pool.project_id, None); + // unique index prevents second fleet-level default + let identity = IdentityMetadataCreateParams { + name: "another-fleet-default".parse().unwrap(), + description: "".to_string(), + }; + let err = datastore + .ip_pool_create( + &opctx, + IpPool::new(&identity, None, /*default= */ true), + ) + .await + .expect_err("Failed to fail to create a second default fleet pool"); + assert_matches!(err, Error::ObjectAlreadyExists { .. }); + // now the interesting thing is that when we fetch the default pool for // a particular silo or a particular project, if those scopes do not // have a default IP pool, we will still get back the fleet default @@ -538,6 +553,17 @@ mod test { .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); + // and we can't create a second default pool for the silo + let identity = IdentityMetadataCreateParams { + name: "second-default-for-silo".parse().unwrap(), + description: "".to_string(), + }; + let err = datastore + .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) + .await + .expect_err("Failed to fail to create second default pool"); + assert_matches!(err, Error::ObjectAlreadyExists { .. }); + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/schema/crdb/3.0.0/up.sql b/schema/crdb/3.0.0/up.sql index 7529c5d180..137c594e48 100644 --- a/schema/crdb/3.0.0/up.sql +++ b/schema/crdb/3.0.0/up.sql @@ -39,8 +39,9 @@ SELECT CAST( ); CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - silo_id, project_id + COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), + COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) ) WHERE - "default" AND time_deleted IS NULL; + "default" = true AND time_deleted IS NULL; COMMIT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index d25f922cb0..83d4c0b5cb 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1500,10 +1500,13 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( ); /* - * Ensure there can only be one default pool for the fleet or a given silo or project + * Ensure there can only be one default pool for the fleet or a given silo or + * project. Coalesce is needed because otherwise different nulls are considered + * to be distinct from each other. */ CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - silo_id, project_id + COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), + COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) ) WHERE "default" = true AND time_deleted IS NULL; From 6e7d60feef5b741fd953f39a77c7f1a11851b476 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 09:08:44 -0500 Subject: [PATCH 08/17] rename default to is_default to work around possible progenitor bug --- nexus/db-model/src/ip_pool.rs | 6 +++--- nexus/db-model/src/schema.rs | 2 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 4 ++-- nexus/src/app/ip_pool.rs | 2 +- nexus/test-utils/src/resource_helpers.rs | 2 +- nexus/tests/integration_tests/endpoints.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 14 +++++++------- nexus/types/src/external_api/params.rs | 3 ++- openapi/nexus.json | 8 ++++---- schema/crdb/3.0.0/up.sql | 4 ++-- schema/crdb/3.0.1/up.sql | 2 +- schema/crdb/dbinit.sql | 4 ++-- 12 files changed, 27 insertions(+), 26 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 3040caf278..837f9b87e0 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -49,14 +49,14 @@ pub struct IpPool { /// Project, if IP pool is associated with a particular project pub project_id: Option, - pub default: bool, + pub is_default: bool, } impl IpPool { pub fn new( pool_identity: &external::IdentityMetadataCreateParams, silo_id: Option, - default: bool, + is_default: bool, ) -> Self { Self { identity: IpPoolIdentity::new( @@ -66,7 +66,7 @@ impl IpPool { rcgen: 0, silo_id, project_id: None, - default, + is_default, } } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7736cbdb8a..b18c7dfddb 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -455,7 +455,7 @@ table! { rcgen -> Int8, silo_id -> Nullable, project_id -> Nullable, - default -> Bool, + is_default -> Bool, } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 50665e91ab..b3b7d75298 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -91,7 +91,7 @@ impl DataStore { .filter( dsl::project_id.eq(project_id).or(dsl::project_id.is_null()), ) - .filter(dsl::default.eq(true)) + .filter(dsl::is_default.eq(true)) .filter(dsl::time_deleted.is_null()) // this will sort by most specific first, i.e., // @@ -479,7 +479,7 @@ mod test { .unwrap(); assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); - assert!(fleet_default_pool.default); + assert!(fleet_default_pool.is_default); assert_eq!(fleet_default_pool.silo_id, None); assert_eq!(fleet_default_pool.project_id, None); diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 6709754049..73379117fb 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -69,7 +69,7 @@ impl super::Nexus { let pool = db::model::IpPool::new( &pool_params.identity, silo_id, - pool_params.default, + pool_params.is_default, ); self.db_datastore.ip_pool_create(opctx, pool).await } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 77a2ac06cc..2368c3f568 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -141,7 +141,7 @@ pub async fn create_ip_pool( description: String::from("an ip pool"), }, silo: None, - default: false, + is_default: false, }, ) .await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 3abf5a2e7f..52d57cc844 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -456,7 +456,7 @@ lazy_static! { }, silo: None, // TODO: this might error due to the unique index - default: true, + is_default: true, }; pub static ref DEMO_IP_POOL_PROJ_URL: String = format!("/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME); diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index e67b4f187d..88bd0546ad 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -104,7 +104,7 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { description: String::from(description), }, silo: None, - default: false, + is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -284,7 +284,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { description: String::from(""), }, silo: Some(NameOrId::Name(cptestctx.silo_name.clone())), - default: false, + is_default: false, }; let created_pool = create_pool(client, ¶ms).await; assert_eq!(created_pool.identity.name, "p0"); @@ -299,7 +299,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { description: String::from(""), }, silo: Some(NameOrId::Id(silo_id)), - default: false, + is_default: false, }; let created_pool = create_pool(client, ¶ms).await; assert_eq!(created_pool.identity.name, "p1"); @@ -314,7 +314,7 @@ async fn test_ip_pool_with_silo(cptestctx: &ControlPlaneTestContext) { silo: Some(NameOrId::Name( String::from("not-a-thing").parse().unwrap(), )), - default: false, + is_default: false, }; let error: HttpErrorResponseBody = NexusRequest::new( RequestBuilder::new(client, Method::POST, "/v1/system/ip-pools") @@ -373,7 +373,7 @@ async fn test_ip_pool_range_overlapping_ranges_fails( description: String::from(description), }, silo: None, - default: false, + is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -556,7 +556,7 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { description: String::from(description), }, silo: None, - default: false, + is_default: false, }; let created_pool: IpPool = NexusRequest::objects_post(client, ip_pools_url, ¶ms) @@ -694,7 +694,7 @@ async fn test_ip_pool_list_usable_by_project( description: String::from("right on cue"), }, silo: None, - default: false, + is_default: false, }; NexusRequest::objects_post(client, ip_pools_url, ¶ms) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index c16b1175a0..1f65431932 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -736,7 +736,8 @@ pub struct IpPoolCreate { /// silo can draw from that pool. pub silo: Option, - pub default: bool, + #[serde(default)] + pub is_default: bool, } /// Parameters for updating an IP Pool diff --git a/openapi/nexus.json b/openapi/nexus.json index fc97417330..e1ab32406c 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -10169,12 +10169,13 @@ "description": "Create-time parameters for an `IpPool`", "type": "object", "properties": { - "default": { - "type": "boolean" - }, "description": { "type": "string" }, + "is_default": { + "default": false, + "type": "boolean" + }, "name": { "$ref": "#/components/schemas/Name" }, @@ -10189,7 +10190,6 @@ } }, "required": [ - "default", "description", "name" ] diff --git a/schema/crdb/3.0.0/up.sql b/schema/crdb/3.0.0/up.sql index 137c594e48..8517d7aaed 100644 --- a/schema/crdb/3.0.0/up.sql +++ b/schema/crdb/3.0.0/up.sql @@ -12,7 +12,7 @@ SELECT CAST( ); ALTER TABLE omicron.public.ip_pool - ADD COLUMN IF NOT EXISTS "default" BOOLEAN NOT NULL DEFAULT FALSE, + ADD COLUMN IF NOT EXISTS is_default BOOLEAN NOT NULL DEFAULT FALSE, ADD COLUMN IF NOT EXISTS silo_ID UUID, ADD COLUMN IF NOT EXISTS project_id UUID, @@ -42,6 +42,6 @@ CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.i COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) ) WHERE - "default" = true AND time_deleted IS NULL; + is_default = true AND time_deleted IS NULL; COMMIT; diff --git a/schema/crdb/3.0.1/up.sql b/schema/crdb/3.0.1/up.sql index def0192837..afcc8a4f2f 100644 --- a/schema/crdb/3.0.1/up.sql +++ b/schema/crdb/3.0.1/up.sql @@ -19,7 +19,7 @@ UPDATE omicron.public.ip_pool WHERE internal = true and time_deleted is null; UPDATE omicron.public.ip_pool - SET "default" = true + SET is_default = true WHERE name = 'default' and time_deleted is null; COMMIT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 83d4c0b5cb..9d916c6f1f 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1483,7 +1483,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( /* The collection's child-resource generation number */ rcgen INT8 NOT NULL, - "default" BOOLEAN NOT NULL DEFAULT FALSE, + is_default BOOLEAN NOT NULL DEFAULT FALSE, /* * Fields representating association with a silo or project. silo_id must @@ -1508,7 +1508,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.i COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) ) WHERE - "default" = true AND time_deleted IS NULL; + is_default = true AND time_deleted IS NULL; /* * Index ensuring uniqueness of IP Pool names, globally. From 2ba2ffe3269f18be2346904b42e128b5e1adf3c9 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 10:00:17 -0500 Subject: [PATCH 09/17] first pass at IP pool lookup by name logic --- .../src/db/datastore/external_ip.rs | 40 ++++++++++++------- nexus/db-queries/src/db/datastore/ip_pool.rs | 34 ++++++++++++++-- 2 files changed, 56 insertions(+), 18 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index be7522456c..841c4b70b0 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -25,9 +25,7 @@ use nexus_types::identity::Resource; use omicron_common::api::external::CreateResult; use omicron_common::api::external::Error; use omicron_common::api::external::LookupResult; -use omicron_common::api::external::Name as ExternalName; use std::net::IpAddr; -use std::str::FromStr; use uuid::Uuid; impl DataStore { @@ -55,21 +53,35 @@ impl DataStore { instance_id: Uuid, pool_name: Option, ) -> CreateResult { - let name = pool_name.unwrap_or_else(|| { - Name(ExternalName::from_str("default").unwrap()) - }); + let current_silo = opctx.authn.silo_required()?; - // TODO: this is the whole thing -- update this call to EITHER look up - // the pool by name (only allowing access to pools matching the scope of - // the current project, i.e., you can pick a pool by name only if it's - // scoped to your project, silo, or the whole fleet) or use the default - // logic in ip_pool_fetch_default_for + // If we have a pool name, look up the pool by name and return it + // as long as its scopes don't conflict with the current scope. + // Otherwise, not found. + let pool = match pool_name { + Some(name) => { + self.ip_pools_fetch_for( + &opctx, + authz::Action::CreateChild, // TODO: wtf + &name, + Some(current_silo.id()), + None, // TODO: include current project ID + ) + .await? + } + // If no name given, use the default logic + None => { + self.ip_pools_fetch_default_for( + &opctx, + authz::Action::CreateChild, // TODO: wtf + Some(current_silo.id()), + None, // TODO: include current project ID + ) + .await? + } + }; - let (.., pool) = self - .ip_pools_fetch_for(opctx, authz::Action::CreateChild, &name) - .await?; let pool_id = pool.identity.id; - let data = IncompleteExternalIp::for_ephemeral(ip_id, instance_id, pool_id); self.allocate_external_ip(opctx, data).await diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index b3b7d75298..c94f0bf939 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -110,23 +110,49 @@ impl DataStore { .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } - /// Looks up an IP pool by name. + /// Looks up an IP pool by name if it does not conflict with your current scope. pub(crate) async fn ip_pools_fetch_for( &self, opctx: &OpContext, action: authz::Action, name: &Name, - ) -> LookupResult<(authz::IpPool, IpPool)> { + silo_id: Option, + project_id: Option, + ) -> LookupResult { let (.., authz_pool, pool) = LookupPath::new(opctx, &self) .ip_pool_name(&name) .fetch_for(action) .await?; - // Can't look up the internal pool + if pool.silo_id == Some(*INTERNAL_SILO_ID) { return Err(authz_pool.not_found()); } - Ok((authz_pool, pool)) + // You can't look up the internal pool by name, and you can't look up + // a pool by name if it conflicts with your current scope, i.e., if it + // has a silo or project and those are different from your current silo + // or project + if let Some(pool_silo_id) = pool.silo_id { + if pool_silo_id == *INTERNAL_SILO_ID { + return Err(authz_pool.not_found()); + } + + if let Some(current_silo_id) = silo_id { + if current_silo_id != pool_silo_id { + return Err(authz_pool.not_found()); + } + } + } + + if let Some(pool_project_id) = pool.project_id { + if let Some(current_project_id) = project_id { + if current_project_id != pool_project_id { + return Err(authz_pool.not_found()); + } + } + } + + Ok(pool) } /// Looks up an IP pool intended for internal services. From 0798ab2ad68dd1faccdd630d2c8c93ff472a1699 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 10:41:15 -0500 Subject: [PATCH 10/17] fix external IPs tests, simplify pool lookup by name logic --- .../src/db/datastore/external_ip.rs | 2 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 28 +++++++------------ .../db-queries/src/db/queries/external_ip.rs | 17 +++++++---- 3 files changed, 22 insertions(+), 25 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 841c4b70b0..45eb3b9af2 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -64,7 +64,7 @@ impl DataStore { &opctx, authz::Action::CreateChild, // TODO: wtf &name, - Some(current_silo.id()), + current_silo.id(), None, // TODO: include current project ID ) .await? diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index c94f0bf939..122aca0e90 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -116,7 +116,8 @@ impl DataStore { opctx: &OpContext, action: authz::Action, name: &Name, - silo_id: Option, + silo_id: Uuid, + // TODO: project_id should always be defined, this is temporary project_id: Option, ) -> LookupResult { let (.., authz_pool, pool) = LookupPath::new(opctx, &self) @@ -124,32 +125,23 @@ impl DataStore { .fetch_for(action) .await?; - if pool.silo_id == Some(*INTERNAL_SILO_ID) { - return Err(authz_pool.not_found()); - } + // You can't look up a pool by name if it conflicts with your current + // scope, i.e., if it has a silo or project and those are different from + // your current silo or project - // You can't look up the internal pool by name, and you can't look up - // a pool by name if it conflicts with your current scope, i.e., if it - // has a silo or project and those are different from your current silo - // or project if let Some(pool_silo_id) = pool.silo_id { - if pool_silo_id == *INTERNAL_SILO_ID { + if pool_silo_id != silo_id { return Err(authz_pool.not_found()); } - - if let Some(current_silo_id) = silo_id { - if current_silo_id != pool_silo_id { - return Err(authz_pool.not_found()); - } - } } - if let Some(pool_project_id) = pool.project_id { - if let Some(current_project_id) = project_id { - if current_project_id != pool_project_id { + match (pool.project_id, project_id) { + (Some(pool_project_id), Some(current_project_id)) => { + if pool_project_id != current_project_id { return Err(authz_pool.not_found()); } } + _ => {} } Ok(pool) diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index e42a9c39a0..a66f74e8c1 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -816,7 +816,6 @@ mod tests { use crate::context::OpContext; use crate::db::datastore::DataStore; use crate::db::datastore::SERVICE_IP_POOL_NAME; - use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; use crate::db::model::IpKind; use crate::db::model::IpPool; @@ -862,14 +861,20 @@ mod tests { Self { logctx, opctx, db, db_datastore } } - async fn create_ip_pool(&self, name: &str, range: IpRange) { + async fn create_ip_pool( + &self, + name: &str, + range: IpRange, + is_default: bool, + ) { + let silo_id = self.opctx.authn.silo_required().unwrap().id(); let pool = IpPool::new( &IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), description: format!("ip pool {}", name), }, - Some(*INTERNAL_SILO_ID), - true, + Some(silo_id), + is_default, ); use crate::db::schema::ip_pool::dsl as ip_pool_dsl; @@ -1709,7 +1714,7 @@ mod tests { Ipv4Addr::new(10, 0, 0, 6), )) .unwrap(); - context.create_ip_pool("p1", second_range).await; + context.create_ip_pool("p1", second_range, /*default*/ false).await; // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. @@ -1752,7 +1757,7 @@ mod tests { let last_address = Ipv4Addr::new(10, 0, 0, 6); let second_range = IpRange::try_from((first_address, last_address)).unwrap(); - context.create_ip_pool("p1", second_range).await; + context.create_ip_pool("p1", second_range, /* default */ false).await; // Allocate all available addresses in the second pool. let instance_id = Uuid::new_v4(); From 54060f4ea0013aa2a4b1dce69572371d1bc98871 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 11:07:45 -0500 Subject: [PATCH 11/17] remove too-strict auth check on getting default pool --- .../src/db/datastore/external_ip.rs | 3 -- nexus/db-queries/src/db/datastore/ip_pool.rs | 29 +++++++++++-------- .../db-queries/src/db/queries/external_ip.rs | 7 +---- nexus/src/app/sagas/instance_create.rs | 9 ++---- nexus/tests/integration_tests/endpoints.rs | 1 - nexus/tests/integration_tests/instances.rs | 3 -- 6 files changed, 20 insertions(+), 32 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 45eb3b9af2..e0d9ebc0a9 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -5,7 +5,6 @@ //! [`DataStore`] methods on [`ExternalIp`]s. use super::DataStore; -use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel_pool; @@ -62,7 +61,6 @@ impl DataStore { Some(name) => { self.ip_pools_fetch_for( &opctx, - authz::Action::CreateChild, // TODO: wtf &name, current_silo.id(), None, // TODO: include current project ID @@ -73,7 +71,6 @@ impl DataStore { None => { self.ip_pools_fetch_default_for( &opctx, - authz::Action::CreateChild, // TODO: wtf Some(current_silo.id()), None, // TODO: include current project ID ) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 122aca0e90..9f1aa295c7 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -79,12 +79,19 @@ impl DataStore { pub async fn ip_pools_fetch_default_for( &self, opctx: &OpContext, - action: authz::Action, silo_id: Option, project_id: Option, ) -> LookupResult { use db::schema::ip_pool::dsl; - opctx.authorize(action, &authz::IP_POOL_LIST).await?; + + // TODO: Need auth check here. Only fleet viewers can list children on + // IP_POOL_LIST, so if we check that, nobody can make instances. This + // used to check CreateChild on an individual IP pool, but now we're not + // looking up by name so the check is more complicated + // + // opctx + // .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) + // .await?; dsl::ip_pool .filter(dsl::silo_id.eq(silo_id).or(dsl::silo_id.is_null())) @@ -114,7 +121,6 @@ impl DataStore { pub(crate) async fn ip_pools_fetch_for( &self, opctx: &OpContext, - action: authz::Action, name: &Name, silo_id: Uuid, // TODO: project_id should always be defined, this is temporary @@ -122,7 +128,9 @@ impl DataStore { ) -> LookupResult { let (.., authz_pool, pool) = LookupPath::new(opctx, &self) .ip_pool_name(&name) - .fetch_for(action) + // any authenticated user can CreateChild on an IP pool. this is + // meant to represent allocating an IP + .fetch_for(authz::Action::CreateChild) .await?; // You can't look up a pool by name if it conflicts with your current @@ -472,7 +480,6 @@ impl DataStore { #[cfg(test)] mod test { - use crate::authz; use crate::db::datastore::datastore_test; use crate::db::model::IpPool; use assert_matches::assert_matches; @@ -487,12 +494,10 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let action = authz::Action::ListChildren; - // we start out with the default fleet-level pool already created, // so when we ask for the fleet default (no silo or project) we get it back let fleet_default_pool = datastore - .ip_pools_fetch_default_for(&opctx, action, None, None) + .ip_pools_fetch_default_for(&opctx, None, None) .await .unwrap(); @@ -523,7 +528,7 @@ mod test { // has no default of its own let silo_id = opctx.authn.silo_required().unwrap().id(); let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, action, Some(silo_id), None) + .ip_pools_fetch_default_for(&opctx, Some(silo_id), None) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); @@ -543,7 +548,7 @@ mod test { // because that one was not a default, when we ask for silo default // pool, we still get the fleet default let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, action, Some(silo_id), None) + .ip_pools_fetch_default_for(&opctx, Some(silo_id), None) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); @@ -559,14 +564,14 @@ mod test { // now when we ask for the silo default pool, we get the one we just made let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, action, Some(silo_id), None) + .ip_pools_fetch_default_for(&opctx, Some(silo_id), None) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.name().as_str(), "default-for-silo"); // and of course, if we ask for the fleet default again we still get that one let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, action, None, None) + .ip_pools_fetch_default_for(&opctx, None, None) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index a66f74e8c1..f463f9cf57 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -924,12 +924,7 @@ mod tests { async fn default_pool_id(&self) -> Uuid { let pool = self .db_datastore - .ip_pools_fetch_default_for( - &self.opctx, - crate::authz::Action::ListChildren, - None, - None, - ) + .ip_pools_fetch_default_for(&self.opctx, None, None) .await .expect("Failed to lookup default ip pool"); pool.identity.id diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 92c5cd67af..2bb37d7475 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -827,13 +827,8 @@ async fn sic_allocate_instance_snat_ip( let ip_id = sagactx.lookup::("snat_ip_id")?; let pool = datastore - .ip_pools_fetch_default_for( - &opctx, - authz::Action::CreateChild, - // TODO: get these values right - None, - None, - ) + // TODO: pass silo ID and project ID + .ip_pools_fetch_default_for(&opctx, None, None) .await .map_err(ActionError::action_failed)?; let pool_id = pool.identity.id; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 52d57cc844..e140ce3620 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -455,7 +455,6 @@ lazy_static! { description: String::from("an IP pool"), }, silo: None, - // TODO: this might error due to the unique index is_default: true, }; pub static ref DEMO_IP_POOL_PROJ_URL: String = diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index a3f4fe0e9a..d9fb161def 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -3434,7 +3434,6 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); } -// TODO: this test fails #[nexus_test] async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; @@ -3503,8 +3502,6 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { .authn_as(AuthnMode::SiloUser(user_id)) .execute() .await - // fails here with 403 that comes from the default IP pool query, - // which checks ListChildren on IP_POOL_LIST .expect("Failed to create instance") .parsed_body::() .expect("Failed to parse instance"); From b296e9925466aeaeafb56b21cba8323b46e18b2c Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 11:49:51 -0500 Subject: [PATCH 12/17] don't pass around the silo ID, it's always the current authz silo --- .../src/db/datastore/external_ip.rs | 23 +++---------- nexus/db-queries/src/db/datastore/ip_pool.rs | 33 ++++++++++--------- .../db-queries/src/db/queries/external_ip.rs | 2 +- nexus/src/app/sagas/instance_create.rs | 4 +-- 4 files changed, 25 insertions(+), 37 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index e0d9ebc0a9..3e51df9837 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -52,30 +52,15 @@ impl DataStore { instance_id: Uuid, pool_name: Option, ) -> CreateResult { - let current_silo = opctx.authn.silo_required()?; - // If we have a pool name, look up the pool by name and return it // as long as its scopes don't conflict with the current scope. // Otherwise, not found. let pool = match pool_name { - Some(name) => { - self.ip_pools_fetch_for( - &opctx, - &name, - current_silo.id(), - None, // TODO: include current project ID - ) - .await? - } + // TODO: include current project ID + Some(name) => self.ip_pools_fetch_for(&opctx, &name, None).await?, // If no name given, use the default logic - None => { - self.ip_pools_fetch_default_for( - &opctx, - Some(current_silo.id()), - None, // TODO: include current project ID - ) - .await? - } + // TODO: include current project ID + None => self.ip_pools_fetch_default_for(&opctx, None).await?, }; let pool_id = pool.identity.id; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 9f1aa295c7..4c984a75a3 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -79,11 +79,12 @@ impl DataStore { pub async fn ip_pools_fetch_default_for( &self, opctx: &OpContext, - silo_id: Option, project_id: Option, ) -> LookupResult { use db::schema::ip_pool::dsl; + let authz_silo_id = opctx.authn.silo_required()?.id(); + // TODO: Need auth check here. Only fleet viewers can list children on // IP_POOL_LIST, so if we check that, nobody can make instances. This // used to check CreateChild on an individual IP pool, but now we're not @@ -94,7 +95,7 @@ impl DataStore { // .await?; dsl::ip_pool - .filter(dsl::silo_id.eq(silo_id).or(dsl::silo_id.is_null())) + .filter(dsl::silo_id.eq(authz_silo_id).or(dsl::silo_id.is_null())) .filter( dsl::project_id.eq(project_id).or(dsl::project_id.is_null()), ) @@ -122,10 +123,11 @@ impl DataStore { &self, opctx: &OpContext, name: &Name, - silo_id: Uuid, // TODO: project_id should always be defined, this is temporary project_id: Option, ) -> LookupResult { + let authz_silo_id = opctx.authn.silo_required()?.id(); + let (.., authz_pool, pool) = LookupPath::new(opctx, &self) .ip_pool_name(&name) // any authenticated user can CreateChild on an IP pool. this is @@ -138,7 +140,7 @@ impl DataStore { // your current silo or project if let Some(pool_silo_id) = pool.silo_id { - if pool_silo_id != silo_id { + if pool_silo_id != authz_silo_id { return Err(authz_pool.not_found()); } } @@ -483,10 +485,12 @@ mod test { use crate::db::datastore::datastore_test; use crate::db::model::IpPool; use assert_matches::assert_matches; + use nexus_db_model::Name; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Resource; use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; use omicron_test_utils::dev; + use uuid::Uuid; #[tokio::test] async fn test_default_ip_pools() { @@ -496,10 +500,8 @@ mod test { // we start out with the default fleet-level pool already created, // so when we ask for the fleet default (no silo or project) we get it back - let fleet_default_pool = datastore - .ip_pools_fetch_default_for(&opctx, None, None) - .await - .unwrap(); + let fleet_default_pool = + datastore.ip_pools_fetch_default_for(&opctx, None).await.unwrap(); assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); assert!(fleet_default_pool.is_default); @@ -524,15 +526,16 @@ mod test { // a particular silo or a particular project, if those scopes do not // have a default IP pool, we will still get back the fleet default - // default for "current" silo is still the fleet default one because it + // default for passed in project is still the fleet default one because it // has no default of its own - let silo_id = opctx.authn.silo_required().unwrap().id(); let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(silo_id), None) + .ip_pools_fetch_default_for(&opctx, Some(Uuid::new_v4())) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); + let silo_id = opctx.authn.silo_required().unwrap().id(); + // create a non-default pool for the silo let identity = IdentityMetadataCreateParams { name: "non-default-for-silo".parse().unwrap(), @@ -548,7 +551,7 @@ mod test { // because that one was not a default, when we ask for silo default // pool, we still get the fleet default let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(silo_id), None) + .ip_pools_fetch_default_for(&opctx, None) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); @@ -564,14 +567,14 @@ mod test { // now when we ask for the silo default pool, we get the one we just made let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(silo_id), None) + .ip_pools_fetch_default_for(&opctx, None) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.name().as_str(), "default-for-silo"); - // and of course, if we ask for the fleet default again we still get that one + // if we ask for the fleet default by name, we can still get that one let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, None, None) + .ip_pools_fetch_for(&opctx, &Name("default".parse().unwrap()), None) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index f463f9cf57..23f6369288 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -924,7 +924,7 @@ mod tests { async fn default_pool_id(&self) -> Uuid { let pool = self .db_datastore - .ip_pools_fetch_default_for(&self.opctx, None, None) + .ip_pools_fetch_default_for(&self.opctx, None) .await .expect("Failed to lookup default ip pool"); pool.identity.id diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 2bb37d7475..c503f39d11 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -827,8 +827,8 @@ async fn sic_allocate_instance_snat_ip( let ip_id = sagactx.lookup::("snat_ip_id")?; let pool = datastore - // TODO: pass silo ID and project ID - .ip_pools_fetch_default_for(&opctx, None, None) + // TODO: pass project ID + .ip_pools_fetch_default_for(&opctx, None) .await .map_err(ActionError::action_failed)?; let pool_id = pool.identity.id; From e591c123b049660a0f1bfabf340a132eb3524df7 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 12:29:15 -0500 Subject: [PATCH 13/17] plumb through project_id, but you still can't make a project-scoped pool --- .../src/db/datastore/external_ip.rs | 12 ++++--- nexus/db-queries/src/db/datastore/ip_pool.rs | 36 +++++++++++++------ .../db-queries/src/db/queries/external_ip.rs | 10 ++++++ nexus/src/app/sagas/instance_create.rs | 13 +++++-- 4 files changed, 53 insertions(+), 18 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 3e51df9837..cf55d7e32e 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -50,17 +50,21 @@ impl DataStore { opctx: &OpContext, ip_id: Uuid, instance_id: Uuid, + project_id: Uuid, pool_name: Option, ) -> CreateResult { // If we have a pool name, look up the pool by name and return it // as long as its scopes don't conflict with the current scope. // Otherwise, not found. let pool = match pool_name { - // TODO: include current project ID - Some(name) => self.ip_pools_fetch_for(&opctx, &name, None).await?, + Some(name) => { + self.ip_pools_fetch_for(&opctx, &name, Some(project_id)).await? + } // If no name given, use the default logic - // TODO: include current project ID - None => self.ip_pools_fetch_default_for(&opctx, None).await?, + None => { + self.ip_pools_fetch_default_for(&opctx, Some(project_id)) + .await? + } }; let pool_id = pool.identity.id; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 4c984a75a3..6dacafc165 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -73,12 +73,15 @@ impl DataStore { } /// Looks up the default IP pool for a given scope, i.e., a given - /// combination of silo and project ID (or none). If there is no default at - /// a given scope, fall back up a level. There should always be a default at - /// fleet level, though this query can theoretically fail. + /// combination of silo and project ID. If there is no default at a given + /// scope, fall back up a level. There should always be a default at fleet + /// level, though this query can theoretically fail. pub async fn ip_pools_fetch_default_for( &self, opctx: &OpContext, + // Optional primarily because there are test contexts where we don't care + // about the project. If project ID is None, we will only get back pools + // that themselves have no project. project_id: Option, ) -> LookupResult { use db::schema::ip_pool::dsl; @@ -123,7 +126,6 @@ impl DataStore { &self, opctx: &OpContext, name: &Name, - // TODO: project_id should always be defined, this is temporary project_id: Option, ) -> LookupResult { let authz_silo_id = opctx.authn.silo_required()?.id(); @@ -498,8 +500,11 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; + // it doesn't matter whether the project exists + let project_id = Uuid::new_v4(); + // we start out with the default fleet-level pool already created, - // so when we ask for the fleet default (no silo or project) we get it back + // so when we ask for a default silo, we get it back let fleet_default_pool = datastore.ip_pools_fetch_default_for(&opctx, None).await.unwrap(); @@ -529,7 +534,7 @@ mod test { // default for passed in project is still the fleet default one because it // has no default of its own let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(Uuid::new_v4())) + .ip_pools_fetch_default_for(&opctx, Some(project_id)) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); @@ -548,12 +553,17 @@ mod test { ) .await; - // because that one was not a default, when we ask for silo default + // because that one was not a default, when we ask for silo or project default // pool, we still get the fleet default let ip_pool = datastore .ip_pools_fetch_default_for(&opctx, None) .await - .expect("Failed to get fleet default IP pool"); + .expect("Failed to get silo default IP pool"); + assert_eq!(ip_pool.id(), fleet_default_pool.id()); + let ip_pool = datastore + .ip_pools_fetch_default_for(&opctx, Some(project_id)) + .await + .expect("Failed to get project default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); // now create a default pool for the silo @@ -565,16 +575,20 @@ mod test { .ip_pool_create(&opctx, IpPool::new(&identity, Some(silo_id), true)) .await; - // now when we ask for the silo default pool, we get the one we just made + // now when we ask for the default pool, we get the one we just made let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, None) + .ip_pools_fetch_default_for(&opctx, Some(project_id)) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.name().as_str(), "default-for-silo"); // if we ask for the fleet default by name, we can still get that one let ip_pool = datastore - .ip_pools_fetch_for(&opctx, &Name("default".parse().unwrap()), None) + .ip_pools_fetch_for( + &opctx, + &Name("default".parse().unwrap()), + Some(project_id), + ) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 23f6369288..7f55ee778d 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -995,6 +995,7 @@ mod tests { let context = TestContext::new("test_ephemeral_and_snat_ips_do_not_overlap") .await; + let project_id = Uuid::new_v4(); let range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 1), @@ -1011,6 +1012,7 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, + project_id, /* pool_name = */ None, ) .await @@ -1050,6 +1052,7 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, + project_id, /* pool_name = */ None, ) .await; @@ -1184,6 +1187,7 @@ mod tests { .unwrap(); context.initialize_ip_pool("default", range).await; + let project_id = Uuid::new_v4(); let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); let pool_name = None; @@ -1194,6 +1198,7 @@ mod tests { &context.opctx, id, instance_id, + project_id, pool_name, ) .await @@ -1713,6 +1718,7 @@ mod tests { // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. + let project_id = Uuid::new_v4(); let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); let pool_name = Some(Name("p1".parse().unwrap())); @@ -1723,6 +1729,7 @@ mod tests { &context.opctx, id, instance_id, + project_id, pool_name, ) .await @@ -1742,6 +1749,7 @@ mod tests { ) .await; + let project_id = Uuid::new_v4(); let first_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 3), @@ -1766,6 +1774,7 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, + project_id, pool_name.clone(), ) .await @@ -1785,6 +1794,7 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, + project_id, pool_name, ) .await diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index c503f39d11..aeb3374a9f 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -825,10 +825,10 @@ async fn sic_allocate_instance_snat_ip( ); let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; + let project_id = saga_params.project_id; let pool = datastore - // TODO: pass project ID - .ip_pools_fetch_default_for(&opctx, None) + .ip_pools_fetch_default_for(&opctx, Some(project_id)) .await .map_err(ActionError::action_failed)?; let pool_id = pool.identity.id; @@ -877,6 +877,7 @@ async fn sic_allocate_instance_external_ip( &sagactx, &saga_params.serialized_authn, ); + let project_id = saga_params.project_id; let instance_id = repeat_saga_params.instance_id; let ip_id = repeat_saga_params.new_id; @@ -887,7 +888,13 @@ async fn sic_allocate_instance_external_ip( } }; datastore - .allocate_instance_ephemeral_ip(&opctx, ip_id, instance_id, pool_name) + .allocate_instance_ephemeral_ip( + &opctx, + ip_id, + instance_id, + project_id, + pool_name, + ) .await .map_err(ActionError::action_failed)?; Ok(()) From 06b1af7d2823f17865247b651078cf89952cbf42 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 13:18:09 -0500 Subject: [PATCH 14/17] PR suggestions around comments and deps --- nexus/Cargo.toml | 2 +- nexus/db-model/src/ip_pool.rs | 2 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 16 +++++++++------- nexus/types/src/external_api/params.rs | 5 +++++ openapi/nexus.json | 1 + 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 33abdab366..0e0ce47fbc 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -68,7 +68,6 @@ serde.workspace = true serde_json.workspace = true serde_urlencoded.workspace = true serde_with.workspace = true -similar-asserts.workspace = true sled-agent-client.workspace = true slog.workspace = true slog-async.workspace = true @@ -118,6 +117,7 @@ petgraph.workspace = true pretty_assertions.workspace = true rcgen.workspace = true regex.workspace = true +similar-asserts.workspace = true rustls = { workspace = true } subprocess.workspace = true term.workspace = true diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 837f9b87e0..86f2b3c878 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -39,7 +39,7 @@ pub struct IpPool { /// Silo, if IP pool is associated with a particular silo. One special use /// for this is associating a pool with the internal silo oxide-internal, /// which is used for internal services. If there is no silo ID, the - /// pool is considered a fleet-wide silo and will be used for allocating + /// pool is considered a fleet-wide pool and will be used for allocating /// instance IPs in silos that don't have their own pool. Must be non- /// null if project_id is non-null (this is enforced as a DB constraint). /// When project_id is non-null, silo_id will (naturally) be the ID of the diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 6dacafc165..51706e187c 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -72,16 +72,18 @@ impl DataStore { .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } - /// Looks up the default IP pool for a given scope, i.e., a given - /// combination of silo and project ID. If there is no default at a given - /// scope, fall back up a level. There should always be a default at fleet - /// level, though this query can theoretically fail. + /// Look up the default IP pool for a given scope, i.e., a given combination + /// of silo and project ID. If there is no default at a given scope, fall + /// back to the next level up. There should always be a default pool at the + /// fleet level, though this query can theoretically fail if someone is able + /// to delete that pool or make another one the default and delete that. pub async fn ip_pools_fetch_default_for( &self, opctx: &OpContext, - // Optional primarily because there are test contexts where we don't care - // about the project. If project ID is None, we will only get back pools - // that themselves have no project. + // Optional primarily because there are test contexts where we don't + // care about the project. If project ID is None, we will only get back + // pools that themselves have no associated project, i.e., are fleet- + // scoped or silo-scoped. project_id: Option, ) -> LookupResult { use db::schema::ip_pool::dsl; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 1f65431932..431f6951cc 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -736,6 +736,11 @@ pub struct IpPoolCreate { /// silo can draw from that pool. pub silo: Option, + /// Whether the IP pool is considered a default pool for its scope (fleet, + /// silo, or project). If a pool is marked default and is associated with a + /// silo and there is no default pool for the project, instances created in + /// that silo will draw IPs from that pool unless another pool is specified + /// at instance create time. #[serde(default)] pub is_default: bool, } diff --git a/openapi/nexus.json b/openapi/nexus.json index e1ab32406c..53f5788910 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -10173,6 +10173,7 @@ "type": "string" }, "is_default": { + "description": "Whether the IP pool is considered a default pool for its scope (fleet, silo, or project). If a pool is marked default and is associated with a silo and there is no default pool for the project, instances created in that silo will draw IPs from that pool unless another pool is specified at instance create time.", "default": false, "type": "boolean" }, From e3c56c45faabe21076da60488f283aeef2b9a5c5 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 14:34:15 -0500 Subject: [PATCH 15/17] don't edit an existing migration! --- nexus/db-model/src/schema.rs | 2 +- schema/crdb/3.0.0/up.sql | 39 +++++++++++++----------------------- schema/crdb/3.0.1/up.sql | 32 +++++++++++++++++++++-------- schema/crdb/3.0.2/up.sql | 12 +++++++++-- schema/crdb/3.0.3/up.sql | 17 ++++++++++++++++ schema/crdb/dbinit.sql | 7 ++++--- 6 files changed, 70 insertions(+), 39 deletions(-) create mode 100644 schema/crdb/3.0.3/up.sql diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index b18c7dfddb..0ed8d9abc5 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1131,7 +1131,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(3, 0, 2); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(3, 0, 3); allow_tables_to_appear_in_same_query!( system_update, diff --git a/schema/crdb/3.0.0/up.sql b/schema/crdb/3.0.0/up.sql index 8517d7aaed..b2883a933a 100644 --- a/schema/crdb/3.0.0/up.sql +++ b/schema/crdb/3.0.0/up.sql @@ -1,3 +1,11 @@ +-- CRDB documentation recommends the following: +-- "Execute schema changes either as single statements (as an implicit transaction), +-- or in an explicit transaction consisting of the single schema change statement." +-- +-- For each schema change, we transactionally: +-- 1. Check the current version +-- 2. Apply the idempotent update + BEGIN; SELECT CAST( @@ -12,36 +20,17 @@ SELECT CAST( ); ALTER TABLE omicron.public.ip_pool - ADD COLUMN IF NOT EXISTS is_default BOOLEAN NOT NULL DEFAULT FALSE, - ADD COLUMN IF NOT EXISTS silo_ID UUID, ADD COLUMN IF NOT EXISTS project_id UUID, - + -- if silo_id is null, then project_id must be null ADD CONSTRAINT IF NOT EXISTS project_implies_silo CHECK ( NOT ((silo_id IS NULL) AND (project_id IS NOT NULL)) - ); -COMMIT; - --- needs to be in its own transaction because of this thrilling bug --- https://github.com/cockroachdb/cockroach/issues/83593 -BEGIN; - -SELECT CAST( - IF( - ( - SELECT version = '2.0.0' and target_version = '3.0.0' - FROM omicron.public.db_metadata WHERE singleton = true - ), - 'true', - 'Invalid starting version for schema change' - ) AS BOOL -); + ), -CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), - COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) -) WHERE - is_default = true AND time_deleted IS NULL; + -- if internal = true, non-null silo_id and project_id are not allowed + ADD CONSTRAINT IF NOT EXISTS internal_pools_have_null_silo_and_project CHECK ( + NOT (INTERNAL AND ((silo_id IS NOT NULL) OR (project_id IS NOT NULL))) + ); COMMIT; diff --git a/schema/crdb/3.0.1/up.sql b/schema/crdb/3.0.1/up.sql index afcc8a4f2f..8c501754f7 100644 --- a/schema/crdb/3.0.1/up.sql +++ b/schema/crdb/3.0.1/up.sql @@ -11,15 +11,31 @@ SELECT CAST( ) AS BOOL ); --- to get ready to drop the internal column, take any IP pools with internal = --- true and set silo_id = INTERNAL_SILO_ID +ALTER TABLE omicron.public.ip_pool + ADD COLUMN IF NOT EXISTS is_default BOOLEAN NOT NULL DEFAULT FALSE, + DROP CONSTRAINT IF EXISTS internal_pools_have_null_silo_and_project; -UPDATE omicron.public.ip_pool - SET silo_id = '001de000-5110-4000-8000-000000000001' - WHERE internal = true and time_deleted is null; +COMMIT; + +-- needs to be in its own transaction because of this thrilling bug +-- https://github.com/cockroachdb/cockroach/issues/83593 +BEGIN; + +SELECT CAST( + IF( + ( + SELECT version = '3.0.0' and target_version = '3.0.1' + FROM omicron.public.db_metadata WHERE singleton = true + ), + 'true', + 'Invalid starting version for schema change' + ) AS BOOL +); -UPDATE omicron.public.ip_pool - SET is_default = true - WHERE name = 'default' and time_deleted is null; +CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( + COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), + COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) +) WHERE + is_default = true AND time_deleted IS NULL; COMMIT; diff --git a/schema/crdb/3.0.2/up.sql b/schema/crdb/3.0.2/up.sql index 70314a00b9..0bca7678ff 100644 --- a/schema/crdb/3.0.2/up.sql +++ b/schema/crdb/3.0.2/up.sql @@ -11,7 +11,15 @@ SELECT CAST( ) AS BOOL ); -ALTER TABLE omicron.public.ip_pool - DROP COLUMN IF EXISTS internal; +-- to get ready to drop the internal column, take any IP pools with internal = +-- true and set silo_id = INTERNAL_SILO_ID + +UPDATE omicron.public.ip_pool + SET silo_id = '001de000-5110-4000-8000-000000000001' + WHERE internal = true and time_deleted is null; + +UPDATE omicron.public.ip_pool + SET is_default = true + WHERE name = 'default' and time_deleted is null; COMMIT; diff --git a/schema/crdb/3.0.3/up.sql b/schema/crdb/3.0.3/up.sql new file mode 100644 index 0000000000..d7ed8097ef --- /dev/null +++ b/schema/crdb/3.0.3/up.sql @@ -0,0 +1,17 @@ +BEGIN; + +SELECT CAST( + IF( + ( + SELECT version = '3.0.2' and target_version = '3.0.3' + FROM omicron.public.db_metadata WHERE singleton = true + ), + 'true', + 'Invalid starting version for schema change' + ) AS BOOL +); + +ALTER TABLE omicron.public.ip_pool + DROP COLUMN IF EXISTS internal; + +COMMIT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 9d916c6f1f..a748cbac65 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1483,8 +1483,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( /* The collection's child-resource generation number */ rcgen INT8 NOT NULL, - is_default BOOLEAN NOT NULL DEFAULT FALSE, - /* * Fields representating association with a silo or project. silo_id must * be non-null if project_id is non-null. silo_id is also used to mark an IP @@ -1493,6 +1491,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( silo_id UUID, project_id UUID, + /* Is this the default pool for its scope (fleet, silo, or project) */ + is_default BOOLEAN NOT NULL DEFAULT FALSE, + -- if silo_id is null, then project_id must be null CONSTRAINT project_implies_silo CHECK ( NOT ((silo_id IS NULL) AND (project_id IS NOT NULL)) @@ -2566,7 +2567,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '3.0.2', NULL) + ( TRUE, NOW(), NOW(), '3.0.3', NULL) ON CONFLICT DO NOTHING; COMMIT; From 2d77525c095663ae966319e4d876c85906abf299 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 16:13:46 -0500 Subject: [PATCH 16/17] remove half-baked project_id stuff for now --- nexus/db-model/src/ip_pool.rs | 9 +-- nexus/db-model/src/schema.rs | 1 - .../src/db/datastore/external_ip.rs | 10 +-- nexus/db-queries/src/db/datastore/ip_pool.rs | 77 ++++--------------- .../db-queries/src/db/queries/external_ip.rs | 12 +-- nexus/src/app/sagas/instance_create.rs | 12 +-- nexus/types/src/external_api/params.rs | 7 +- openapi/nexus.json | 2 +- schema/crdb/3.0.1/up.sql | 5 +- schema/crdb/dbinit.sql | 25 +++--- 10 files changed, 37 insertions(+), 123 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 86f2b3c878..e67a513235 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -40,15 +40,9 @@ pub struct IpPool { /// for this is associating a pool with the internal silo oxide-internal, /// which is used for internal services. If there is no silo ID, the /// pool is considered a fleet-wide pool and will be used for allocating - /// instance IPs in silos that don't have their own pool. Must be non- - /// null if project_id is non-null (this is enforced as a DB constraint). - /// When project_id is non-null, silo_id will (naturally) be the ID of the - /// project's silo. + /// instance IPs in silos that don't have their own pool. pub silo_id: Option, - /// Project, if IP pool is associated with a particular project - pub project_id: Option, - pub is_default: bool, } @@ -65,7 +59,6 @@ impl IpPool { ), rcgen: 0, silo_id, - project_id: None, is_default, } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 0ed8d9abc5..78ebb527cd 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -454,7 +454,6 @@ table! { time_deleted -> Nullable, rcgen -> Int8, silo_id -> Nullable, - project_id -> Nullable, is_default -> Bool, } } diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index cf55d7e32e..9ddbeebb0b 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -50,21 +50,15 @@ impl DataStore { opctx: &OpContext, ip_id: Uuid, instance_id: Uuid, - project_id: Uuid, pool_name: Option, ) -> CreateResult { // If we have a pool name, look up the pool by name and return it // as long as its scopes don't conflict with the current scope. // Otherwise, not found. let pool = match pool_name { - Some(name) => { - self.ip_pools_fetch_for(&opctx, &name, Some(project_id)).await? - } + Some(name) => self.ip_pools_fetch_for(&opctx, &name).await?, // If no name given, use the default logic - None => { - self.ip_pools_fetch_default_for(&opctx, Some(project_id)) - .await? - } + None => self.ip_pools_fetch_default_for(&opctx).await?, }; let pool_id = pool.identity.id; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 51706e187c..166237eabe 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -72,19 +72,14 @@ impl DataStore { .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } - /// Look up the default IP pool for a given scope, i.e., a given combination - /// of silo and project ID. If there is no default at a given scope, fall - /// back to the next level up. There should always be a default pool at the - /// fleet level, though this query can theoretically fail if someone is able - /// to delete that pool or make another one the default and delete that. + /// Look up the default IP pool for the current silo. If there is no default + /// at silo scope, fall back to the next level up, namely the fleet default. + /// There should always be a default pool at the fleet level, though this + /// query can theoretically fail if someone is able to delete that pool or + /// make another one the default and delete that. pub async fn ip_pools_fetch_default_for( &self, opctx: &OpContext, - // Optional primarily because there are test contexts where we don't - // care about the project. If project ID is None, we will only get back - // pools that themselves have no associated project, i.e., are fleet- - // scoped or silo-scoped. - project_id: Option, ) -> LookupResult { use db::schema::ip_pool::dsl; @@ -101,22 +96,15 @@ impl DataStore { dsl::ip_pool .filter(dsl::silo_id.eq(authz_silo_id).or(dsl::silo_id.is_null())) - .filter( - dsl::project_id.eq(project_id).or(dsl::project_id.is_null()), - ) .filter(dsl::is_default.eq(true)) .filter(dsl::time_deleted.is_null()) // this will sort by most specific first, i.e., // - // (silo, project) - // (silo, null) - // (null, null) + // (silo) + // (null) // // then by only taking the first result, we get the most specific one - .order(( - dsl::project_id.asc().nulls_last(), - dsl::silo_id.asc().nulls_last(), - )) + .order(dsl::silo_id.asc().nulls_last()) .select(IpPool::as_select()) .first_async::(self.pool_authorized(opctx).await?) .await @@ -128,7 +116,6 @@ impl DataStore { &self, opctx: &OpContext, name: &Name, - project_id: Option, ) -> LookupResult { let authz_silo_id = opctx.authn.silo_required()?.id(); @@ -140,24 +127,13 @@ impl DataStore { .await?; // You can't look up a pool by name if it conflicts with your current - // scope, i.e., if it has a silo or project and those are different from - // your current silo or project - + // scope, i.e., if it has a silo it is different from your current silo if let Some(pool_silo_id) = pool.silo_id { if pool_silo_id != authz_silo_id { return Err(authz_pool.not_found()); } } - match (pool.project_id, project_id) { - (Some(pool_project_id), Some(current_project_id)) => { - if pool_project_id != current_project_id { - return Err(authz_pool.not_found()); - } - } - _ => {} - } - Ok(pool) } @@ -494,7 +470,6 @@ mod test { use nexus_types::identity::Resource; use omicron_common::api::external::{Error, IdentityMetadataCreateParams}; use omicron_test_utils::dev; - use uuid::Uuid; #[tokio::test] async fn test_default_ip_pools() { @@ -502,18 +477,14 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // it doesn't matter whether the project exists - let project_id = Uuid::new_v4(); - // we start out with the default fleet-level pool already created, // so when we ask for a default silo, we get it back let fleet_default_pool = - datastore.ip_pools_fetch_default_for(&opctx, None).await.unwrap(); + datastore.ip_pools_fetch_default_for(&opctx).await.unwrap(); assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); assert!(fleet_default_pool.is_default); assert_eq!(fleet_default_pool.silo_id, None); - assert_eq!(fleet_default_pool.project_id, None); // unique index prevents second fleet-level default let identity = IdentityMetadataCreateParams { @@ -529,18 +500,9 @@ mod test { .expect_err("Failed to fail to create a second default fleet pool"); assert_matches!(err, Error::ObjectAlreadyExists { .. }); - // now the interesting thing is that when we fetch the default pool for - // a particular silo or a particular project, if those scopes do not + // when we fetch the default pool for a silo, if those scopes do not // have a default IP pool, we will still get back the fleet default - // default for passed in project is still the fleet default one because it - // has no default of its own - let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(project_id)) - .await - .expect("Failed to get silo's default IP pool"); - assert_eq!(ip_pool.id(), fleet_default_pool.id()); - let silo_id = opctx.authn.silo_required().unwrap().id(); // create a non-default pool for the silo @@ -555,18 +517,13 @@ mod test { ) .await; - // because that one was not a default, when we ask for silo or project default + // because that one was not a default, when we ask for the silo default // pool, we still get the fleet default let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, None) + .ip_pools_fetch_default_for(&opctx) .await .expect("Failed to get silo default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); - let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(project_id)) - .await - .expect("Failed to get project default IP pool"); - assert_eq!(ip_pool.id(), fleet_default_pool.id()); // now create a default pool for the silo let identity = IdentityMetadataCreateParams { @@ -579,18 +536,14 @@ mod test { // now when we ask for the default pool, we get the one we just made let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(project_id)) + .ip_pools_fetch_default_for(&opctx) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.name().as_str(), "default-for-silo"); // if we ask for the fleet default by name, we can still get that one let ip_pool = datastore - .ip_pools_fetch_for( - &opctx, - &Name("default".parse().unwrap()), - Some(project_id), - ) + .ip_pools_fetch_for(&opctx, &Name("default".parse().unwrap())) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 7f55ee778d..e305291d53 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -924,7 +924,7 @@ mod tests { async fn default_pool_id(&self) -> Uuid { let pool = self .db_datastore - .ip_pools_fetch_default_for(&self.opctx, None) + .ip_pools_fetch_default_for(&self.opctx) .await .expect("Failed to lookup default ip pool"); pool.identity.id @@ -995,7 +995,6 @@ mod tests { let context = TestContext::new("test_ephemeral_and_snat_ips_do_not_overlap") .await; - let project_id = Uuid::new_v4(); let range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 1), @@ -1012,7 +1011,6 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, - project_id, /* pool_name = */ None, ) .await @@ -1052,7 +1050,6 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, - project_id, /* pool_name = */ None, ) .await; @@ -1187,7 +1184,6 @@ mod tests { .unwrap(); context.initialize_ip_pool("default", range).await; - let project_id = Uuid::new_v4(); let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); let pool_name = None; @@ -1198,7 +1194,6 @@ mod tests { &context.opctx, id, instance_id, - project_id, pool_name, ) .await @@ -1718,7 +1713,6 @@ mod tests { // Allocating an address on an instance in the second pool should be // respected, even though there are IPs available in the first. - let project_id = Uuid::new_v4(); let instance_id = Uuid::new_v4(); let id = Uuid::new_v4(); let pool_name = Some(Name("p1".parse().unwrap())); @@ -1729,7 +1723,6 @@ mod tests { &context.opctx, id, instance_id, - project_id, pool_name, ) .await @@ -1749,7 +1742,6 @@ mod tests { ) .await; - let project_id = Uuid::new_v4(); let first_range = IpRange::try_from(( Ipv4Addr::new(10, 0, 0, 1), Ipv4Addr::new(10, 0, 0, 3), @@ -1774,7 +1766,6 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, - project_id, pool_name.clone(), ) .await @@ -1794,7 +1785,6 @@ mod tests { &context.opctx, Uuid::new_v4(), instance_id, - project_id, pool_name, ) .await diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index aeb3374a9f..46271c56bf 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -825,10 +825,9 @@ async fn sic_allocate_instance_snat_ip( ); let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; - let project_id = saga_params.project_id; let pool = datastore - .ip_pools_fetch_default_for(&opctx, Some(project_id)) + .ip_pools_fetch_default_for(&opctx) .await .map_err(ActionError::action_failed)?; let pool_id = pool.identity.id; @@ -877,7 +876,6 @@ async fn sic_allocate_instance_external_ip( &sagactx, &saga_params.serialized_authn, ); - let project_id = saga_params.project_id; let instance_id = repeat_saga_params.instance_id; let ip_id = repeat_saga_params.new_id; @@ -888,13 +886,7 @@ async fn sic_allocate_instance_external_ip( } }; datastore - .allocate_instance_ephemeral_ip( - &opctx, - ip_id, - instance_id, - project_id, - pool_name, - ) + .allocate_instance_ephemeral_ip(&opctx, ip_id, instance_id, pool_name) .await .map_err(ActionError::action_failed)?; Ok(()) diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 431f6951cc..f3acf7757b 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -737,10 +737,9 @@ pub struct IpPoolCreate { pub silo: Option, /// Whether the IP pool is considered a default pool for its scope (fleet, - /// silo, or project). If a pool is marked default and is associated with a - /// silo and there is no default pool for the project, instances created in - /// that silo will draw IPs from that pool unless another pool is specified - /// at instance create time. + /// or silo). If a pool is marked default and is associated with a silo, + /// instances created in that silo will draw IPs from that pool unless + /// another pool is specified at instance create time. #[serde(default)] pub is_default: bool, } diff --git a/openapi/nexus.json b/openapi/nexus.json index 53f5788910..b0f60a97c8 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -10173,7 +10173,7 @@ "type": "string" }, "is_default": { - "description": "Whether the IP pool is considered a default pool for its scope (fleet, silo, or project). If a pool is marked default and is associated with a silo and there is no default pool for the project, instances created in that silo will draw IPs from that pool unless another pool is specified at instance create time.", + "description": "Whether the IP pool is considered a default pool for its scope (fleet, or silo). If a pool is marked default and is associated with a silo, instances created in that silo will draw IPs from that pool unless another pool is specified at instance create time.", "default": false, "type": "boolean" }, diff --git a/schema/crdb/3.0.1/up.sql b/schema/crdb/3.0.1/up.sql index 8c501754f7..a630c170e9 100644 --- a/schema/crdb/3.0.1/up.sql +++ b/schema/crdb/3.0.1/up.sql @@ -12,7 +12,9 @@ SELECT CAST( ); ALTER TABLE omicron.public.ip_pool + DROP COLUMN IF EXISTS project_id, ADD COLUMN IF NOT EXISTS is_default BOOLEAN NOT NULL DEFAULT FALSE, + DROP CONSTRAINT IF EXISTS project_implies_silo, DROP CONSTRAINT IF EXISTS internal_pools_have_null_silo_and_project; COMMIT; @@ -33,8 +35,7 @@ SELECT CAST( ); CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), - COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) + COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid) ) WHERE is_default = true AND time_deleted IS NULL; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a748cbac65..0d612e36ac 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1484,30 +1484,23 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( rcgen INT8 NOT NULL, /* - * Fields representating association with a silo or project. silo_id must - * be non-null if project_id is non-null. silo_id is also used to mark an IP - * pool as "internal" by associating it with the oxide-internal silo. + * Association with a silo. silo_id is also used to mark an IP pool as + * "internal" by associating it with the oxide-internal silo. Null silo_id + * means the pool is can be used fleet-wide. */ silo_id UUID, - project_id UUID, - - /* Is this the default pool for its scope (fleet, silo, or project) */ - is_default BOOLEAN NOT NULL DEFAULT FALSE, - -- if silo_id is null, then project_id must be null - CONSTRAINT project_implies_silo CHECK ( - NOT ((silo_id IS NULL) AND (project_id IS NOT NULL)) - ) + /* Is this the default pool for its scope (fleet or silo) */ + is_default BOOLEAN NOT NULL DEFAULT FALSE ); /* - * Ensure there can only be one default pool for the fleet or a given silo or - * project. Coalesce is needed because otherwise different nulls are considered - * to be distinct from each other. + * Ensure there can only be one default pool for the fleet or a given silo. + * Coalesce is needed because otherwise different nulls are considered to be + * distinct from each other. */ CREATE UNIQUE INDEX IF NOT EXISTS one_default_pool_per_scope ON omicron.public.ip_pool ( - COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid), - COALESCE(project_id, '00000000-0000-0000-0000-000000000000'::uuid) + COALESCE(silo_id, '00000000-0000-0000-0000-000000000000'::uuid) ) WHERE is_default = true AND time_deleted IS NULL; From 8ab9920c0775c762af8c1e39f2cd1741f1193c0d Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 30 Aug 2023 16:48:57 -0500 Subject: [PATCH 17/17] remove superfluous _for on method names --- nexus/db-queries/src/db/datastore/external_ip.rs | 4 ++-- nexus/db-queries/src/db/datastore/ip_pool.rs | 12 ++++++------ nexus/db-queries/src/db/queries/external_ip.rs | 2 +- nexus/src/app/sagas/instance_create.rs | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 9ddbeebb0b..c7b9852611 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -56,9 +56,9 @@ impl DataStore { // as long as its scopes don't conflict with the current scope. // Otherwise, not found. let pool = match pool_name { - Some(name) => self.ip_pools_fetch_for(&opctx, &name).await?, + Some(name) => self.ip_pools_fetch(&opctx, &name).await?, // If no name given, use the default logic - None => self.ip_pools_fetch_default_for(&opctx).await?, + None => self.ip_pools_fetch_default(&opctx).await?, }; let pool_id = pool.identity.id; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 166237eabe..f2e1d4a821 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -77,7 +77,7 @@ impl DataStore { /// There should always be a default pool at the fleet level, though this /// query can theoretically fail if someone is able to delete that pool or /// make another one the default and delete that. - pub async fn ip_pools_fetch_default_for( + pub async fn ip_pools_fetch_default( &self, opctx: &OpContext, ) -> LookupResult { @@ -112,7 +112,7 @@ impl DataStore { } /// Looks up an IP pool by name if it does not conflict with your current scope. - pub(crate) async fn ip_pools_fetch_for( + pub(crate) async fn ip_pools_fetch( &self, opctx: &OpContext, name: &Name, @@ -480,7 +480,7 @@ mod test { // we start out with the default fleet-level pool already created, // so when we ask for a default silo, we get it back let fleet_default_pool = - datastore.ip_pools_fetch_default_for(&opctx).await.unwrap(); + datastore.ip_pools_fetch_default(&opctx).await.unwrap(); assert_eq!(fleet_default_pool.identity.name.as_str(), "default"); assert!(fleet_default_pool.is_default); @@ -520,7 +520,7 @@ mod test { // because that one was not a default, when we ask for the silo default // pool, we still get the fleet default let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx) + .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); @@ -536,14 +536,14 @@ mod test { // now when we ask for the default pool, we get the one we just made let ip_pool = datastore - .ip_pools_fetch_default_for(&opctx) + .ip_pools_fetch_default(&opctx) .await .expect("Failed to get silo's default IP pool"); assert_eq!(ip_pool.name().as_str(), "default-for-silo"); // if we ask for the fleet default by name, we can still get that one let ip_pool = datastore - .ip_pools_fetch_for(&opctx, &Name("default".parse().unwrap())) + .ip_pools_fetch(&opctx, &Name("default".parse().unwrap())) .await .expect("Failed to get fleet default IP pool"); assert_eq!(ip_pool.id(), fleet_default_pool.id()); diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index e305291d53..e5f57181fa 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -924,7 +924,7 @@ mod tests { async fn default_pool_id(&self) -> Uuid { let pool = self .db_datastore - .ip_pools_fetch_default_for(&self.opctx) + .ip_pools_fetch_default(&self.opctx) .await .expect("Failed to lookup default ip pool"); pool.identity.id diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 46271c56bf..b520081de8 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -827,7 +827,7 @@ async fn sic_allocate_instance_snat_ip( let ip_id = sagactx.lookup::("snat_ip_id")?; let pool = datastore - .ip_pools_fetch_default_for(&opctx) + .ip_pools_fetch_default(&opctx) .await .map_err(ActionError::action_failed)?; let pool_id = pool.identity.id;