From ae77727d291194860127f0b631ad8e01474361fe Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 16 May 2022 09:38:42 -0400 Subject: [PATCH 01/29] a compiling (... but wrong) version of attach --- nexus/src/db/collection_attach.rs | 809 ++++++++++++++++++++++++++++++ nexus/src/db/mod.rs | 1 + 2 files changed, 810 insertions(+) create mode 100644 nexus/src/db/collection_attach.rs diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs new file mode 100644 index 00000000000..b762242df14 --- /dev/null +++ b/nexus/src/db/collection_attach.rs @@ -0,0 +1,809 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! CTE implementation for updating a row representing a child resource of a +//! collection. This atomically +//! 1) Checks if the collection exists and is not soft deleted, and fails +//! otherwise +//! 2) Updates the collection's child resource generation number +//! 3) Updates the child resource row + +use super::pool::DbConnection; +use async_bb8_diesel::{ + AsyncRunQueryDsl, ConnectionError, ConnectionManager, PoolError, +}; +use diesel::associations::HasTable; +use diesel::helper_types::*; +use diesel::pg::Pg; +use diesel::prelude::*; +use diesel::query_builder::*; +use diesel::query_dsl::methods as query_methods; +use diesel::query_source::Table; +use diesel::sql_types::SingleValue; +use std::fmt::Debug; +use std::marker::PhantomData; + +/// Trait to be implemented by structs representing an attachable collection. +/// +/// For example, since Instances have a one-to-many relationship with +/// Disks, the Instance datatype should implement this trait. +/// ``` +/// # use diesel::prelude::*; +/// # use omicron_nexus::db::collection_insert::DatastoreAttachable; +/// # use omicron_nexus::db::model::Generation; +/// # +/// # table! { +/// # test_schema.instance (id) { +/// # id -> Uuid, +/// # time_deleted -> Nullable, +/// # rcgen -> Int8, +/// # } +/// # } +/// # +/// # table! { +/// # test_schema.disk (id) { +/// # id -> Uuid, +/// # time_deleted -> Nullable, +/// # rcgen -> Int8, +/// # instance_id -> Nullable, +/// # } +/// # } +/// +/// #[derive(Queryable, Insertable, Debug, Selectable)] +/// #[diesel(table_name = disk)] +/// struct Disk { +/// pub id: uuid::Uuid, +/// pub time_deleted: Option>, +/// pub rcgen: Generation, +/// pub instance_id: Option, +/// } +/// +/// #[derive(Queryable, Insertable, Debug, Selectable)] +/// #[diesel(table_name = instance)] +/// struct Instance { +/// pub id: uuid::Uuid, +/// pub time_deleted: Option>, +/// pub rcgen: Generation, +/// } +/// +/// impl DatastoreAttachable for Instance { +/// // Type of Instance::identity::id and the "Some" variant of the optional +/// // Disk::instance_id. +/// type CollectionId = uuid::Uuid; +/// +/// type ParentGenerationColumn = instance::dsl::rcgen; +/// type ParentTimeDeletedColumn = instance::dsl::time_deleted; +/// +/// type ChildGenerationColumn = disk::dsl::rcgen; +/// type ChildTimeDeletedColumn = disk::dsl::time_deleted; +/// type ChildCollectionIdColumn = disk::dsl::instance_id; +/// } +/// ``` +pub trait DatastoreAttachable { + /// The Rust type of the collection id (typically Uuid for us) + type CollectionId: Copy + Debug; + + /// The column in the CollectionTable that acts as a generation number. + /// This is the "child-resource-generation-number" in RFD 192. + type ParentGenerationColumn: Column + Default; + + /// The time deleted column in the CollectionTable + // We enforce that this column comes from the same table as + // ParentGenerationColumn when defining insert_resource() below. + type ParentTimeDeletedColumn: Column + Default; + + /// The column in the ResourceType that acts as a generation number. + type ChildGenerationColumn: Column + Default; + type ChildTimeDeletedColumn: Column + Default; + + /// The column in the ResourceType that acts as a foreign key into + /// the CollectionTable + type ChildCollectionIdColumn: Column; + + /// Create a statement for inserting a resource into the given collection. + /// + /// The ISR type is the same type as the second generic argument to + /// InsertStatement, and should generally be inferred rather than explicitly + /// specified. + /// + /// CAUTION: The API does not currently enforce that `key` matches the value + /// of the collection id within the inserted row. + fn insert_resource( + key: Self::CollectionId, + // Note that InsertStatement's fourth argument defaults to Ret = + // NoReturningClause. This enforces that the given input statement does + // not have a RETURNING clause. + insert: InsertStatement, ISR>, + ) -> InsertIntoCollectionStatement + where + ( + ::Table, + ::Table, + ): TypesAreSame, + Self: Sized, + // Enables the "table()" method. + CollectionTable: HasTable> + + 'static + + Send + + Table + // Allows calling ".into_boxed()" on the table. + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput>, + >, + // Allows treating "filter_subquery" as a boxed "dyn QueryFragment". + as QuerySource>::FromClause: + QueryFragment + Send, + // Allows sending "filter_subquery" between threads. + as AsQuery>::SqlType: Send, + // Allows calling ".filter()" on the boxed table. + BoxedQuery>: + query_methods::FilterDsl< + Eq< + CollectionPrimaryKey, + CollectionId, + >, + Output = BoxedQuery>, + > + query_methods::FilterDsl< + IsNull>, + Output = BoxedQuery>, + >, + // Allows using "key" in in ".eq(...)". + CollectionId: diesel::expression::AsExpression< + SerializedCollectionPrimaryKey, + >, + as Expression>::SqlType: + SingleValue, + // Allows calling "is_null()" on the time deleted column. + ParentTimeDeletedColumn: ExpressionMethods, + // Necessary for output type (`InsertIntoCollectionStatement`). + ResourceType: Selectable, + { + let filter_subquery = Box::new( + as HasTable>::table() + .into_boxed() + .filter( + as HasTable>::table() + .primary_key() + .eq(key), + ) + .filter(Self::ParentTimeDeletedColumn::default().is_null()), + ); + + let from_clause = + as HasTable>::table() + .from_clause(); + let returning_clause = ResourceType::as_returning(); + InsertIntoCollectionStatement { + insert_statement: insert, + filter_subquery, + from_clause, + returning_clause, + query_type: PhantomData, + } + } +} + +/// Utility type to make trait bounds below easier to read. +type CollectionId = + >::CollectionId; +/// The table representing the collection. The resource references +/// this table. +type CollectionTable = <>::ParentGenerationColumn as Column>::Table; +/// The table representing the resource. This table contains an +/// ID acting as a foreign key into the collection table. +type ResourceTable = <>::ChildCollectionIdColumn as Column>::Table; +type ParentTimeDeletedColumn = + >::ParentTimeDeletedColumn; +type ParentGenerationColumn = + >::ParentGenerationColumn; + +// Trick to check that columns come from the same table +pub trait TypesAreSame {} +impl TypesAreSame for (T, T) {} + +/// The CTE described in the module docs +#[must_use = "Queries must be executed"] +pub struct InsertIntoCollectionStatement +where + ResourceType: Selectable, + C: DatastoreAttachable, +{ + insert_statement: InsertStatement, ISR>, + filter_subquery: Box + Send>, + from_clause: as QuerySource>::FromClause, + returning_clause: AsSelect, + query_type: PhantomData, +} + +impl QueryId + for InsertIntoCollectionStatement +where + C: DatastoreAttachable, + ResourceType: Selectable, +{ + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +/// Result of [`InsertIntoCollectionStatement`] when executed asynchronously +pub type AsyncInsertIntoCollectionResult = Result; + +/// Result of [`InsertIntoCollectionStatement`] when executed synchronously +pub type SyncInsertIntoCollectionResult = Result; + +/// Errors returned by [`InsertIntoCollectionStatement`]. +#[derive(Debug)] +pub enum AsyncInsertError { + /// The collection that the query was inserting into does not exist + CollectionNotFound, + /// Other database error + DatabaseError(PoolError), +} + +/// Errors returned by [`InsertIntoCollectionStatement`]. +#[derive(Debug)] +pub enum SyncInsertError { + /// The collection that the query was inserting into does not exist + CollectionNotFound, + /// Other database error + DatabaseError(diesel::result::Error), +} + +impl InsertIntoCollectionStatement +where + ResourceType: 'static + Debug + Send + Selectable, + C: 'static + DatastoreAttachable + Send, + CollectionId: 'static + PartialEq + Send, + ResourceTable: 'static + Table + Send + Copy + Debug, + ISR: 'static + Send, + InsertIntoCollectionStatement: Send, +{ + /// Issues the CTE asynchronously and parses the result. + /// + /// The three outcomes are: + /// - Ok(new row) + /// - Error(collection not found) + /// - Error(other diesel error) + pub async fn insert_and_get_result_async( + self, + pool: &bb8::Pool>, + ) -> AsyncInsertIntoCollectionResult + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + { + self.get_result_async::(pool) + .await + .map_err(Self::translate_async_error) + } + + /// Issues the CTE asynchronously and parses the result. + /// + /// The three outcomes are: + /// - Ok(Vec of new rows) + /// - Error(collection not found) + /// - Error(other diesel error) + pub async fn insert_and_get_results_async( + self, + pool: &bb8::Pool>, + ) -> AsyncInsertIntoCollectionResult> + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + { + self.get_results_async::(pool) + .await + .map_err(Self::translate_async_error) + } + + /// Issues the CTE synchronously and parses the result. + /// + /// The three outcomes are: + /// - Ok(new row) + /// - Error(collection not found) + /// - Error(other diesel error) + pub fn insert_and_get_result( + self, + conn: &mut DbConnection, + ) -> SyncInsertIntoCollectionResult + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + { + self.get_result::(conn) + .map_err(Self::translate_sync_error) + } + + /// Issues the CTE synchronously and parses the result. + /// + /// The three outcomes are: + /// - Ok(Vec of new rows) + /// - Error(collection not found) + /// - Error(other diesel error) + pub fn insert_and_get_results( + self, + conn: &mut DbConnection, + ) -> SyncInsertIntoCollectionResult> + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + { + self.get_results::(conn) + .map_err(Self::translate_sync_error) + } + + /// Check for the intentional division by zero error + fn error_is_division_by_zero(err: &diesel::result::Error) -> bool { + match err { + // See + // https://rfd.shared.oxide.computer/rfd/0192#_dueling_administrators + // for a full explanation of why we're checking for this. In + // summary, the CTE generates a division by zero intentionally + // if the collection doesn't exist in the database. + diesel::result::Error::DatabaseError( + diesel::result::DatabaseErrorKind::Unknown, + info, + ) if info.message() == "division by zero" => true, + _ => false, + } + } + + /// Translate from diesel errors into AsyncInsertError, handling the + /// intentional division-by-zero error in the CTE. + fn translate_async_error(err: PoolError) -> AsyncInsertError { + match err { + PoolError::Connection(ConnectionError::Query(err)) + if Self::error_is_division_by_zero(&err) => + { + AsyncInsertError::CollectionNotFound + } + other => AsyncInsertError::DatabaseError(other), + } + } + + /// Translate from diesel errors into SyncInsertError, handling the + /// intentional division-by-zero error in the CTE. + fn translate_sync_error(err: diesel::result::Error) -> SyncInsertError { + if Self::error_is_division_by_zero(&err) { + SyncInsertError::CollectionNotFound + } else { + SyncInsertError::DatabaseError(err) + } + } +} + +type SelectableSqlType = + <>::SelectExpression as Expression>::SqlType; + +impl Query + for InsertIntoCollectionStatement +where + ResourceType: Selectable, + C: DatastoreAttachable, +{ + type SqlType = SelectableSqlType; +} + +impl RunQueryDsl + for InsertIntoCollectionStatement +where + ResourceType: Selectable, + C: DatastoreAttachable, +{ +} + +// Representation of Primary Key in Rust. +type CollectionPrimaryKey = + as Table>::PrimaryKey; +// Representation of Primary Key in SQL. +type SerializedCollectionPrimaryKey = + as diesel::Expression>::SqlType; + +type TableSqlType = ::SqlType; + +type BoxedQuery = diesel::helper_types::IntoBoxed<'static, T, Pg>; +type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< + 'static, + TableSqlType, + diesel::internal::table_macro::FromClause, + Pg, +>; + +/// This implementation uses the following CTE: +/// +/// ```text +/// // WITH found_row AS MATERIALIZED ( +/// // SELECT FROM C WHERE = AND +/// // IS NULL FOR UPDATE), +/// // dummy AS MATERIALIZED ( +/// // SELECT IF(EXISTS(SELECT FROM found_row), TRUE, +/// // CAST(1/0 AS BOOL))), +/// // updated_row AS MATERIALIZED ( +/// // UPDATE C SET = + 1 WHERE +/// // IN (SELECT FROM found_row) RETURNING 1), +/// // inserted_row AS ( +/// // RETURNING ) +/// // SELECT * FROM inserted_row; +/// ``` +/// +/// This CTE is equivalent in desired behavior to the one specified in +/// [RFD 192](https://rfd.shared.oxide.computer/rfd/0192#_dueling_administrators). +/// +/// The general idea is that the first clause of the CTE (the "dummy" table) +/// will generate a divison-by-zero error and rollback the transaction if the +/// target collection is not found in its table. It simultaneously locks the +/// row for update, to allow us to subsequently use the "updated_row" query to +/// increase the child-resource generation count for the collection. In the same +/// transaction, it performs the provided insert query, which should +/// insert a new resource into its table with its collection id column set +/// to the collection we just checked for. +/// +/// NOTE: It is important that the WHERE clauses on the SELECT and UPDATE +/// against the collection table must match, or else we will not get the desired +/// behavior described in RFD 192. +/// NOTE: It is important that the WITH clauses have MATERIALIZED, since under +/// some conditions, clauses may be inlined (and potentially eliminated by +/// consequence of being unused). At the time of writing this, this happens +/// for the "dummy" table, preventing the division-by-zero error from occuring. +/// The MATERIALIZED keyword forces the queries that are not referenced +/// to be materialized instead. +impl QueryFragment + for InsertIntoCollectionStatement +where + ResourceType: Selectable, + C: DatastoreAttachable, + CollectionPrimaryKey: diesel::Column, + // Necessary to "walk_ast" over "select.from_clause". + as QuerySource>::FromClause: + QueryFragment, + // Necessary to "walk_ast" over "self.insert_statement". + InsertStatement, ISR>: QueryFragment, + // Necessary to "walk_ast" over "self.returning_clause". + AsSelect: QueryFragment, +{ + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + out.push_sql("WITH found_row AS MATERIALIZED ("); + self.filter_subquery.walk_ast(out.reborrow())?; + // Manually add the FOR_UPDATE, since .for_update() is incompatible with + // BoxedQuery + out.push_sql(" FOR UPDATE), "); + out.push_sql( + "dummy AS MATERIALIZED (\ + SELECT IF(EXISTS(SELECT ", + ); + out.push_identifier(CollectionPrimaryKey::::NAME)?; + out.push_sql(" FROM found_row), TRUE, CAST(1/0 AS BOOL))), "); + + // Write the update manually instead of with the dsl, to avoid the + // explosion in complexity of type traits + out.push_sql("updated_row AS MATERIALIZED (UPDATE "); + self.from_clause.walk_ast(out.reborrow())?; + out.push_sql(" SET "); + out.push_identifier(ParentGenerationColumn::::NAME)?; + out.push_sql(" = "); + out.push_identifier(ParentGenerationColumn::::NAME)?; + out.push_sql(" + 1 WHERE "); + out.push_identifier(CollectionPrimaryKey::::NAME)?; + out.push_sql(" IN (SELECT "); + // We must include "RETURNING 1" since all CTE clauses must return + // something + out.push_identifier(CollectionPrimaryKey::::NAME)?; + out.push_sql(" FROM found_row) RETURNING 1), "); + + out.push_sql("inserted_row AS ("); + // TODO: Check or force the insert_statement to have + // C::ChildCollectionIdColumn set + self.insert_statement.walk_ast(out.reborrow())?; + out.push_sql(" RETURNING "); + // We manually write the RETURNING clause here because the wrapper type + // used for InsertStatement's Ret generic is private to diesel and so we + // cannot express it. + self.returning_clause.walk_ast(out.reborrow())?; + + out.push_sql(") SELECT * FROM inserted_row"); + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::{AsyncInsertError, DatastoreAttachable, SyncInsertError}; + use crate::db::{ + self, error::TransactionError, identity::Resource as IdentityResource, + }; + use async_bb8_diesel::{ + AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + }; + use chrono::{DateTime, NaiveDateTime, Utc}; + use db_macros::Resource; + use diesel::expression_methods::ExpressionMethods; + use diesel::pg::Pg; + use diesel::QueryDsl; + use nexus_test_utils::db::test_setup_database; + use omicron_test_utils::dev; + + table! { + test_schema.collection (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + rcgen -> Int8, + } + } + + table! { + test_schema.resource (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + rcgen -> Int8, + collection_id -> Nullable, + } + } + + async fn setup_db(pool: &crate::db::Pool) { + let connection = pool.pool().get().await.unwrap(); + (*connection) + .batch_execute_async( + "CREATE SCHEMA IF NOT EXISTS test_schema; \ + CREATE TABLE IF NOT EXISTS test_schema.collection ( \ + id UUID PRIMARY KEY, \ + name STRING(63) NOT NULL, \ + description STRING(512) NOT NULL, \ + time_created TIMESTAMPTZ NOT NULL, \ + time_modified TIMESTAMPTZ NOT NULL, \ + time_deleted TIMESTAMPTZ, \ + rcgen INT NOT NULL); \ + CREATE TABLE IF NOT EXISTS test_schema.resource( \ + id UUID PRIMARY KEY, \ + name STRING(63) NOT NULL, \ + description STRING(512) NOT NULL, \ + time_created TIMESTAMPTZ NOT NULL, \ + time_modified TIMESTAMPTZ NOT NULL, \ + time_deleted TIMESTAMPTZ, \ + rcgen INT NOT NULL, \ + collection_id UUID); \ + TRUNCATE test_schema.collection; \ + TRUNCATE test_schema.resource", + ) + .await + .unwrap(); + } + + /// Describes a resource within the database. + #[derive(Queryable, Insertable, Debug, Resource, Selectable)] + #[diesel(table_name = resource)] + struct Resource { + #[diesel(embed)] + pub identity: ResourceIdentity, + + pub rcgen: i64, + pub collection_id: Option, + } + + struct Collection; + impl DatastoreAttachable for Collection { + type CollectionId = uuid::Uuid; + + type ParentGenerationColumn = collection::dsl::rcgen; + type ParentTimeDeletedColumn = collection::dsl::time_deleted; + + type ChildGenerationColumn = resource::dsl::rcgen; + type ChildTimeDeletedColumn = resource::dsl::time_deleted; + type ChildCollectionIdColumn = resource::dsl::collection_id; + } + + #[test] + fn test_verify_query() { + let collection_id = + uuid::Uuid::parse_str("223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0") + .unwrap(); + let resource_id = + uuid::Uuid::parse_str("223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8") + .unwrap(); + let create_time = + DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); + let modify_time = + DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); + let insert = Collection::insert_resource( + collection_id, + diesel::insert_into(resource::table).values(vec![( + resource::dsl::id.eq(resource_id), + resource::dsl::name.eq("test"), + resource::dsl::description.eq("desc"), + resource::dsl::time_created.eq(create_time), + resource::dsl::time_modified.eq(modify_time), + resource::dsl::rcgen.eq(0), + resource::dsl::collection_id.eq(Some(collection_id)), + )]), + ); + let query = diesel::debug_query::(&insert).to_string(); + + let expected_query = "WITH \ + found_row AS MATERIALIZED (SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\" \ + FROM \"test_schema\".\"collection\" WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $1) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE), \ + dummy AS MATERIALIZED (SELECT IF(\ + EXISTS(SELECT \"id\" FROM found_row), \ + TRUE, CAST(1/0 AS BOOL))), \ + updated_row AS MATERIALIZED (UPDATE \ + \"test_schema\".\"collection\" SET \"rcgen\" = \"rcgen\" + 1 \ + WHERE \"id\" IN (SELECT \"id\" FROM found_row) RETURNING 1), \ + inserted_row AS (INSERT INTO \"test_schema\".\"resource\" \ + (\"id\", \"name\", \"description\", \"time_created\", \ + \"time_modified\", \"collection_id\") \ + VALUES ($2, $3, $4, $5, $6, $7) \ + RETURNING \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\") \ + SELECT * FROM inserted_row \ + -- binds: [223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0, \ + 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8, \ + \"test\", \ + \"desc\", \ + 1970-01-01T00:00:00Z, \ + 1970-01-01T00:00:01Z, \ + 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0]"; + + assert_eq!(query, expected_query); + } + + #[tokio::test] + async fn test_collection_not_present() { + let logctx = dev::test_setup_log("test_collection_not_present"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + let insert = Collection::insert_resource( + collection_id, + diesel::insert_into(resource::table).values(( + resource::dsl::id.eq(resource_id), + resource::dsl::name.eq("test"), + resource::dsl::description.eq("desc"), + resource::dsl::time_created.eq(Utc::now()), + resource::dsl::time_modified.eq(Utc::now()), + resource::dsl::rcgen.eq(0), + resource::dsl::collection_id.eq(Some(collection_id)), + )), + ) + .insert_and_get_result_async(pool.pool()) + .await; + assert!(matches!(insert, Err(AsyncInsertError::CollectionNotFound))); + + let insert_query = Collection::insert_resource( + collection_id, + diesel::insert_into(resource::table).values(( + resource::dsl::id.eq(resource_id), + resource::dsl::name.eq("test"), + resource::dsl::description.eq("desc"), + resource::dsl::time_created.eq(Utc::now()), + resource::dsl::time_modified.eq(Utc::now()), + resource::dsl::rcgen.eq(0), + resource::dsl::collection_id.eq(Some(collection_id)), + )), + ); + + #[derive(Debug)] + enum CollectionError { + NotFound, + } + type TxnError = TransactionError; + + let result = pool + .pool() + .transaction(move |conn| { + insert_query.insert_and_get_result(conn).map_err(|e| match e { + SyncInsertError::CollectionNotFound => { + TxnError::CustomError(CollectionError::NotFound) + } + SyncInsertError::DatabaseError(e) => TxnError::from(e), + }) + }) + .await; + + assert!(matches!( + result, + Err(TxnError::CustomError(CollectionError::NotFound)) + )); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_collection_present() { + let logctx = dev::test_setup_log("test_collection_present"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Insert the collection so it's present later + diesel::insert_into(collection::table) + .values(vec![( + collection::dsl::id.eq(collection_id), + collection::dsl::name.eq("test"), + collection::dsl::description.eq("desc"), + collection::dsl::time_created.eq(Utc::now()), + collection::dsl::time_modified.eq(Utc::now()), + collection::dsl::rcgen.eq(1), + )]) + .execute_async(pool.pool()) + .await + .unwrap(); + + let create_time = + DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); + let modify_time = + DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); + let resource = Collection::insert_resource( + collection_id, + diesel::insert_into(resource::table).values(vec![( + resource::dsl::id.eq(resource_id), + resource::dsl::name.eq("test"), + resource::dsl::description.eq("desc"), + resource::dsl::time_created.eq(create_time), + resource::dsl::time_modified.eq(modify_time), + resource::dsl::rcgen.eq(0), + resource::dsl::collection_id.eq(Some(collection_id)), + )]), + ) + .insert_and_get_result_async(pool.pool()) + .await + .unwrap(); + assert_eq!(resource.id(), resource_id); + assert_eq!(resource.name().as_str(), "test"); + assert_eq!(resource.description(), "desc"); + assert_eq!(resource.time_created(), create_time); + assert_eq!(resource.time_modified(), modify_time); + assert_eq!(resource.collection_id.unwrap(), collection_id); + + let collection_rcgen = collection::table + .find(collection_id) + .select(collection::dsl::rcgen) + .first_async::(pool.pool()) + .await + .unwrap(); + + // Make sure rcgen got incremented + assert_eq!(collection_rcgen, 2); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index 1ea4d28bb05..8da5a6aef51 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -6,6 +6,7 @@ // This is not intended to be public, but this is necessary to use it from // doctests +pub mod collection_attach; pub mod collection_insert; mod config; From 1f1230bb1bce01915a3d5c61f0d3a4d26e3acd2c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 16 May 2022 15:57:26 -0400 Subject: [PATCH 02/29] wip collection attach --- nexus/src/app/instance.rs | 4 + nexus/src/db/collection_attach.rs | 275 ++++++++++++++---------------- nexus/src/db/model/disk.rs | 11 ++ 3 files changed, 144 insertions(+), 146 deletions(-) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 47cbb4ba352..9e3e0fc29e6 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -654,6 +654,10 @@ impl super::Nexus { // TODO this will probably involve volume construction requests as // well! InstanceState::Running | InstanceState::Starting => { + + // TODO: set state as "attaching". + // TODO: also can we check rcgens + self.disk_set_runtime( opctx, &authz_disk, diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index b762242df14..62952776f39 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -30,7 +30,7 @@ use std::marker::PhantomData; /// Disks, the Instance datatype should implement this trait. /// ``` /// # use diesel::prelude::*; -/// # use omicron_nexus::db::collection_insert::DatastoreAttachable; +/// # use omicron_nexus::db::collection_attach::DatastoreAttachTarget; /// # use omicron_nexus::db::model::Generation; /// # /// # table! { @@ -50,7 +50,7 @@ use std::marker::PhantomData; /// # } /// # } /// -/// #[derive(Queryable, Insertable, Debug, Selectable)] +/// #[derive(Queryable, Debug, Selectable)] /// #[diesel(table_name = disk)] /// struct Disk { /// pub id: uuid::Uuid, @@ -59,7 +59,7 @@ use std::marker::PhantomData; /// pub instance_id: Option, /// } /// -/// #[derive(Queryable, Insertable, Debug, Selectable)] +/// #[derive(Queryable, Debug, Selectable)] /// #[diesel(table_name = instance)] /// struct Instance { /// pub id: uuid::Uuid, @@ -67,20 +67,17 @@ use std::marker::PhantomData; /// pub rcgen: Generation, /// } /// -/// impl DatastoreAttachable for Instance { -/// // Type of Instance::identity::id and the "Some" variant of the optional -/// // Disk::instance_id. +/// impl DatastoreAttachTarget for Instance { +/// // Type of Instance::identity::id. /// type CollectionId = uuid::Uuid; /// /// type ParentGenerationColumn = instance::dsl::rcgen; /// type ParentTimeDeletedColumn = instance::dsl::time_deleted; /// -/// type ChildGenerationColumn = disk::dsl::rcgen; -/// type ChildTimeDeletedColumn = disk::dsl::time_deleted; /// type ChildCollectionIdColumn = disk::dsl::instance_id; /// } /// ``` -pub trait DatastoreAttachable { +pub trait DatastoreAttachTarget { /// The Rust type of the collection id (typically Uuid for us) type CollectionId: Copy + Debug; @@ -90,32 +87,31 @@ pub trait DatastoreAttachable { /// The time deleted column in the CollectionTable // We enforce that this column comes from the same table as - // ParentGenerationColumn when defining insert_resource() below. + // ParentGenerationColumn when defining attach_resource() below. type ParentTimeDeletedColumn: Column + Default; - /// The column in the ResourceType that acts as a generation number. - type ChildGenerationColumn: Column + Default; - type ChildTimeDeletedColumn: Column + Default; - /// The column in the ResourceType that acts as a foreign key into /// the CollectionTable type ChildCollectionIdColumn: Column; - /// Create a statement for inserting a resource into the given collection. + /// Create a statement for attaching a resource to the given collection. /// - /// The ISR type is the same type as the second generic argument to - /// InsertStatement, and should generally be inferred rather than explicitly + /// The U, V types are the same type as the 3rd and 4th generic arguments to + /// UpdateStatement, and should generally be inferred rather than explicitly /// specified. /// /// CAUTION: The API does not currently enforce that `key` matches the value - /// of the collection id within the inserted row. - fn insert_resource( + /// of the collection id within the attached row. + fn attach_resource( key: Self::CollectionId, - // Note that InsertStatement's fourth argument defaults to Ret = + // TODO: I'd like to be able to add some filters on the parent type too. + // For example, checking the instance state. + + // Note that UpdateStatement's fourth argument defaults to Ret = // NoReturningClause. This enforces that the given input statement does // not have a RETURNING clause. - insert: InsertStatement, ISR>, - ) -> InsertIntoCollectionStatement + update: UpdateStatement, U, V>, + ) -> AttachToCollectionStatement where ( ::Table, @@ -158,7 +154,7 @@ pub trait DatastoreAttachable { SingleValue, // Allows calling "is_null()" on the time deleted column. ParentTimeDeletedColumn: ExpressionMethods, - // Necessary for output type (`InsertIntoCollectionStatement`). + // Necessary for output type (`AttachToCollectionStatement`). ResourceType: Selectable, { let filter_subquery = Box::new( @@ -176,8 +172,8 @@ pub trait DatastoreAttachable { as HasTable>::table() .from_clause(); let returning_clause = ResourceType::as_returning(); - InsertIntoCollectionStatement { - insert_statement: insert, + AttachToCollectionStatement { + update_statement: update, filter_subquery, from_clause, returning_clause, @@ -188,21 +184,21 @@ pub trait DatastoreAttachable { /// Utility type to make trait bounds below easier to read. type CollectionId = - >::CollectionId; + >::CollectionId; /// The table representing the collection. The resource references /// this table. -type CollectionTable = < = <>::ParentGenerationColumn as Column>::Table; /// The table representing the resource. This table contains an /// ID acting as a foreign key into the collection table. -type ResourceTable = < = <>::ChildCollectionIdColumn as Column>::Table; type ParentTimeDeletedColumn = - >::ParentTimeDeletedColumn; + >::ParentTimeDeletedColumn; type ParentGenerationColumn = - >::ParentGenerationColumn; + >::ParentGenerationColumn; // Trick to check that columns come from the same table pub trait TypesAreSame {} @@ -210,35 +206,35 @@ impl TypesAreSame for (T, T) {} /// The CTE described in the module docs #[must_use = "Queries must be executed"] -pub struct InsertIntoCollectionStatement +pub struct AttachToCollectionStatement where ResourceType: Selectable, - C: DatastoreAttachable, + C: DatastoreAttachTarget, { - insert_statement: InsertStatement, ISR>, + update_statement: UpdateStatement, U, V>, filter_subquery: Box + Send>, from_clause: as QuerySource>::FromClause, returning_clause: AsSelect, query_type: PhantomData, } -impl QueryId - for InsertIntoCollectionStatement +impl QueryId + for AttachToCollectionStatement where - C: DatastoreAttachable, ResourceType: Selectable, + C: DatastoreAttachTarget, { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } -/// Result of [`InsertIntoCollectionStatement`] when executed asynchronously -pub type AsyncInsertIntoCollectionResult = Result; +/// Result of [`AttachToCollectionStatement`] when executed asynchronously +pub type AsyncAttachToCollectionResult = Result; -/// Result of [`InsertIntoCollectionStatement`] when executed synchronously -pub type SyncInsertIntoCollectionResult = Result; +/// Result of [`AttachToCollectionStatement`] when executed synchronously +pub type SyncAttachToCollectionResult = Result; -/// Errors returned by [`InsertIntoCollectionStatement`]. +/// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] pub enum AsyncInsertError { /// The collection that the query was inserting into does not exist @@ -247,7 +243,7 @@ pub enum AsyncInsertError { DatabaseError(PoolError), } -/// Errors returned by [`InsertIntoCollectionStatement`]. +/// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] pub enum SyncInsertError { /// The collection that the query was inserting into does not exist @@ -256,14 +252,15 @@ pub enum SyncInsertError { DatabaseError(diesel::result::Error), } -impl InsertIntoCollectionStatement +impl AttachToCollectionStatement where ResourceType: 'static + Debug + Send + Selectable, - C: 'static + DatastoreAttachable + Send, + C: 'static + DatastoreAttachTarget + Send, CollectionId: 'static + PartialEq + Send, ResourceTable: 'static + Table + Send + Copy + Debug, - ISR: 'static + Send, - InsertIntoCollectionStatement: Send, + U: 'static + Send, + V: 'static + Send, + AttachToCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. /// @@ -271,10 +268,10 @@ where /// - Ok(new row) /// - Error(collection not found) /// - Error(other diesel error) - pub async fn insert_and_get_result_async( + pub async fn attach_and_get_result_async( self, pool: &bb8::Pool>, - ) -> AsyncInsertIntoCollectionResult + ) -> AsyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, @@ -290,10 +287,10 @@ where /// - Ok(Vec of new rows) /// - Error(collection not found) /// - Error(other diesel error) - pub async fn insert_and_get_results_async( + pub async fn attach_and_get_results_async( self, pool: &bb8::Pool>, - ) -> AsyncInsertIntoCollectionResult> + ) -> AsyncAttachToCollectionResult> where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, @@ -309,10 +306,10 @@ where /// - Ok(new row) /// - Error(collection not found) /// - Error(other diesel error) - pub fn insert_and_get_result( + pub fn attach_and_get_result( self, conn: &mut DbConnection, - ) -> SyncInsertIntoCollectionResult + ) -> SyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, @@ -327,10 +324,10 @@ where /// - Ok(Vec of new rows) /// - Error(collection not found) /// - Error(other diesel error) - pub fn insert_and_get_results( + pub fn attach_and_get_results( self, conn: &mut DbConnection, - ) -> SyncInsertIntoCollectionResult> + ) -> SyncAttachToCollectionResult> where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, @@ -382,20 +379,20 @@ where type SelectableSqlType = <>::SelectExpression as Expression>::SqlType; -impl Query - for InsertIntoCollectionStatement +impl Query + for AttachToCollectionStatement where ResourceType: Selectable, - C: DatastoreAttachable, + C: DatastoreAttachTarget, { type SqlType = SelectableSqlType; } -impl RunQueryDsl - for InsertIntoCollectionStatement +impl RunQueryDsl + for AttachToCollectionStatement where ResourceType: Selectable, - C: DatastoreAttachable, + C: DatastoreAttachTarget, { } @@ -425,12 +422,12 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // dummy AS MATERIALIZED ( /// // SELECT IF(EXISTS(SELECT FROM found_row), TRUE, /// // CAST(1/0 AS BOOL))), -/// // updated_row AS MATERIALIZED ( +/// // updated_parent_row AS MATERIALIZED ( /// // UPDATE C SET = + 1 WHERE /// // IN (SELECT FROM found_row) RETURNING 1), -/// // inserted_row AS ( +/// // updated_resource_row AS ( /// // RETURNING ) -/// // SELECT * FROM inserted_row; +/// // SELECT * FROM updated_resource_row; /// ``` /// /// This CTE is equivalent in desired behavior to the one specified in @@ -439,11 +436,11 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// The general idea is that the first clause of the CTE (the "dummy" table) /// will generate a divison-by-zero error and rollback the transaction if the /// target collection is not found in its table. It simultaneously locks the -/// row for update, to allow us to subsequently use the "updated_row" query to +/// row for update, to allow us to subsequently use the "updated_parent_row" query to /// increase the child-resource generation count for the collection. In the same -/// transaction, it performs the provided insert query, which should -/// insert a new resource into its table with its collection id column set -/// to the collection we just checked for. +/// transaction, it performs the provided update statement, which should +/// update the child resource, referencing the collection ID to the parent +/// collection we just checked for. /// /// NOTE: It is important that the WHERE clauses on the SELECT and UPDATE /// against the collection table must match, or else we will not get the desired @@ -454,17 +451,17 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// for the "dummy" table, preventing the division-by-zero error from occuring. /// The MATERIALIZED keyword forces the queries that are not referenced /// to be materialized instead. -impl QueryFragment - for InsertIntoCollectionStatement +impl QueryFragment + for AttachToCollectionStatement where ResourceType: Selectable, - C: DatastoreAttachable, + C: DatastoreAttachTarget, CollectionPrimaryKey: diesel::Column, // Necessary to "walk_ast" over "select.from_clause". as QuerySource>::FromClause: QueryFragment, - // Necessary to "walk_ast" over "self.insert_statement". - InsertStatement, ISR>: QueryFragment, + // Necessary to "walk_ast" over "self.update_statement". + UpdateStatement, U, V>: QueryFragment, // Necessary to "walk_ast" over "self.returning_clause". AsSelect: QueryFragment, { @@ -483,7 +480,7 @@ where // Write the update manually instead of with the dsl, to avoid the // explosion in complexity of type traits - out.push_sql("updated_row AS MATERIALIZED (UPDATE "); + out.push_sql("updated_parent_row AS MATERIALIZED (UPDATE "); self.from_clause.walk_ast(out.reborrow())?; out.push_sql(" SET "); out.push_identifier(ParentGenerationColumn::::NAME)?; @@ -497,24 +494,24 @@ where out.push_identifier(CollectionPrimaryKey::::NAME)?; out.push_sql(" FROM found_row) RETURNING 1), "); - out.push_sql("inserted_row AS ("); - // TODO: Check or force the insert_statement to have + out.push_sql("updated_resource_row AS ("); + // TODO: Check or force the update_statement to have // C::ChildCollectionIdColumn set - self.insert_statement.walk_ast(out.reborrow())?; + self.update_statement.walk_ast(out.reborrow())?; out.push_sql(" RETURNING "); // We manually write the RETURNING clause here because the wrapper type - // used for InsertStatement's Ret generic is private to diesel and so we + // used for UpdateStatement's Ret generic is private to diesel and so we // cannot express it. self.returning_clause.walk_ast(out.reborrow())?; - out.push_sql(") SELECT * FROM inserted_row"); + out.push_sql(") SELECT * FROM updated_resource_row"); Ok(()) } } #[cfg(test)] mod test { - use super::{AsyncInsertError, DatastoreAttachable, SyncInsertError}; + use super::{AsyncInsertError, DatastoreAttachTarget, SyncInsertError}; use crate::db::{ self, error::TransactionError, identity::Resource as IdentityResource, }; @@ -595,14 +592,12 @@ mod test { } struct Collection; - impl DatastoreAttachable for Collection { + impl DatastoreAttachTarget for Collection { type CollectionId = uuid::Uuid; type ParentGenerationColumn = collection::dsl::rcgen; type ParentTimeDeletedColumn = collection::dsl::time_deleted; - type ChildGenerationColumn = resource::dsl::rcgen; - type ChildTimeDeletedColumn = resource::dsl::time_deleted; type ChildCollectionIdColumn = resource::dsl::collection_id; } @@ -614,23 +609,12 @@ mod test { let resource_id = uuid::Uuid::parse_str("223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8") .unwrap(); - let create_time = - DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); - let modify_time = - DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); - let insert = Collection::insert_resource( + let attach = Collection::attach_resource( collection_id, - diesel::insert_into(resource::table).values(vec![( - resource::dsl::id.eq(resource_id), - resource::dsl::name.eq("test"), - resource::dsl::description.eq("desc"), - resource::dsl::time_created.eq(create_time), - resource::dsl::time_modified.eq(modify_time), - resource::dsl::rcgen.eq(0), - resource::dsl::collection_id.eq(Some(collection_id)), - )]), + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) ); - let query = diesel::debug_query::(&insert).to_string(); + let query = diesel::debug_query::(&attach).to_string(); let expected_query = "WITH \ found_row AS MATERIALIZED (SELECT \ @@ -648,28 +632,24 @@ mod test { dummy AS MATERIALIZED (SELECT IF(\ EXISTS(SELECT \"id\" FROM found_row), \ TRUE, CAST(1/0 AS BOOL))), \ - updated_row AS MATERIALIZED (UPDATE \ + updated_parent_row AS MATERIALIZED (UPDATE \ \"test_schema\".\"collection\" SET \"rcgen\" = \"rcgen\" + 1 \ WHERE \"id\" IN (SELECT \"id\" FROM found_row) RETURNING 1), \ - inserted_row AS (INSERT INTO \"test_schema\".\"resource\" \ - (\"id\", \"name\", \"description\", \"time_created\", \ - \"time_modified\", \"collection_id\") \ - VALUES ($2, $3, $4, $5, $6, $7) \ + updated_resource_row AS (UPDATE \"test_schema\".\"resource\" \ + SET \"collection_id\" = $2 \ + WHERE (\"test_schema\".\"resource\".\"id\" = $3) \ RETURNING \"test_schema\".\"resource\".\"id\", \ \"test_schema\".\"resource\".\"name\", \ \"test_schema\".\"resource\".\"description\", \ \"test_schema\".\"resource\".\"time_created\", \ \"test_schema\".\"resource\".\"time_modified\", \ \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"rcgen\", \ \"test_schema\".\"resource\".\"collection_id\") \ - SELECT * FROM inserted_row \ + SELECT * FROM updated_resource_row \ -- binds: [223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0, \ - 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8, \ - \"test\", \ - \"desc\", \ - 1970-01-01T00:00:00Z, \ - 1970-01-01T00:00:01Z, \ - 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0]"; + 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0, \ + 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8]"; assert_eq!(query, expected_query); } @@ -685,33 +665,19 @@ mod test { let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); - let insert = Collection::insert_resource( + let attach = Collection::attach_resource( collection_id, - diesel::insert_into(resource::table).values(( - resource::dsl::id.eq(resource_id), - resource::dsl::name.eq("test"), - resource::dsl::description.eq("desc"), - resource::dsl::time_created.eq(Utc::now()), - resource::dsl::time_modified.eq(Utc::now()), - resource::dsl::rcgen.eq(0), - resource::dsl::collection_id.eq(Some(collection_id)), - )), + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) ) - .insert_and_get_result_async(pool.pool()) + .attach_and_get_result_async(pool.pool()) .await; - assert!(matches!(insert, Err(AsyncInsertError::CollectionNotFound))); + assert!(matches!(attach, Err(AsyncInsertError::CollectionNotFound))); - let insert_query = Collection::insert_resource( + let attach_query = Collection::attach_resource( collection_id, - diesel::insert_into(resource::table).values(( - resource::dsl::id.eq(resource_id), - resource::dsl::name.eq("test"), - resource::dsl::description.eq("desc"), - resource::dsl::time_created.eq(Utc::now()), - resource::dsl::time_modified.eq(Utc::now()), - resource::dsl::rcgen.eq(0), - resource::dsl::collection_id.eq(Some(collection_id)), - )), + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) ); #[derive(Debug)] @@ -723,7 +689,7 @@ mod test { let result = pool .pool() .transaction(move |conn| { - insert_query.insert_and_get_result(conn).map_err(|e| match e { + attach_query.attach_and_get_result(conn).map_err(|e| match e { SyncInsertError::CollectionNotFound => { TxnError::CustomError(CollectionError::NotFound) } @@ -767,31 +733,48 @@ mod test { .await .unwrap(); - let create_time = + // Insert the resource so it's present later + let insert_time = DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); - let modify_time = - DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); - let resource = Collection::insert_resource( - collection_id, - diesel::insert_into(resource::table).values(vec![( + diesel::insert_into(resource::table) + .values(vec![( resource::dsl::id.eq(resource_id), resource::dsl::name.eq("test"), resource::dsl::description.eq("desc"), - resource::dsl::time_created.eq(create_time), - resource::dsl::time_modified.eq(modify_time), - resource::dsl::rcgen.eq(0), - resource::dsl::collection_id.eq(Some(collection_id)), - )]), + resource::dsl::time_created.eq(insert_time), + resource::dsl::time_modified.eq(insert_time), + resource::dsl::rcgen.eq(1), + resource::dsl::collection_id.eq(Option::::None), + )]) + .execute_async(pool.pool()) + .await + .unwrap(); + + // Attempt to attach the resource. + let update_time = + DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); + let resource = Collection::attach_resource( + collection_id, + diesel::update( + resource::table + .filter(resource::dsl::id.eq(resource_id)) + .filter(resource::dsl::time_deleted.is_null()) + ).set(( + resource::dsl::collection_id.eq(collection_id), + resource::dsl::time_modified.eq(update_time), + resource::dsl::rcgen.eq(resource::dsl::rcgen + 1), + )) ) - .insert_and_get_result_async(pool.pool()) + .attach_and_get_result_async(pool.pool()) .await .unwrap(); assert_eq!(resource.id(), resource_id); assert_eq!(resource.name().as_str(), "test"); assert_eq!(resource.description(), "desc"); - assert_eq!(resource.time_created(), create_time); - assert_eq!(resource.time_modified(), modify_time); + assert_eq!(resource.time_created(), insert_time); + assert_eq!(resource.time_modified(), update_time); assert_eq!(resource.collection_id.unwrap(), collection_id); + assert_eq!(resource.rcgen, 2); let collection_rcgen = collection::table .find(collection_id) diff --git a/nexus/src/db/model/disk.rs b/nexus/src/db/model/disk.rs index d4aa2f20e16..b7516557e93 100644 --- a/nexus/src/db/model/disk.rs +++ b/nexus/src/db/model/disk.rs @@ -163,6 +163,17 @@ impl DiskRuntimeState { } } + pub fn attaching(self, instance_id: Uuid) -> Self { + Self { + disk_state: external::DiskState::Attaching(instance_id) + .label() + .to_string(), + attach_instance_id: Some(instance_id), + gen: self.gen.next().into(), + time_updated: Utc::now(), + } + } + pub fn attach(self, instance_id: Uuid) -> Self { Self { disk_state: external::DiskState::Attached(instance_id) From ddcd66e8e4f9b7eb9a969b307bda1883ef22a430 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 17 May 2022 14:34:00 -0400 Subject: [PATCH 03/29] more wip --- nexus/src/db/collection_attach.rs | 82 ++++++++++++++++++++++--------- 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 62952776f39..509f5e0ffe3 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -9,18 +9,20 @@ //! 2) Updates the collection's child resource generation number //! 3) Updates the child resource row +use crate::db::model::Generation; use super::pool::DbConnection; use async_bb8_diesel::{ AsyncRunQueryDsl, ConnectionError, ConnectionManager, PoolError, }; use diesel::associations::HasTable; +use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; use diesel::pg::Pg; use diesel::prelude::*; use diesel::query_builder::*; use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; -use diesel::sql_types::SingleValue; +use diesel::sql_types::{BigInt, SingleValue}; use std::fmt::Debug; use std::marker::PhantomData; @@ -83,7 +85,7 @@ pub trait DatastoreAttachTarget { /// The column in the CollectionTable that acts as a generation number. /// This is the "child-resource-generation-number" in RFD 192. - type ParentGenerationColumn: Column + Default; + type ParentGenerationColumn: Column + Default + Expression; /// The time deleted column in the CollectionTable // We enforce that this column comes from the same table as @@ -104,6 +106,7 @@ pub trait DatastoreAttachTarget { /// of the collection id within the attached row. fn attach_resource( key: Self::CollectionId, + rcgen: Generation, // TODO: I'd like to be able to add some filters on the parent type too. // For example, checking the instance state. @@ -145,7 +148,16 @@ pub trait DatastoreAttachTarget { > + query_methods::FilterDsl< IsNull>, Output = BoxedQuery>, + > + query_methods::FilterDsl< + Eq< + ParentGenerationColumn, +// Expression, +// AsExpr>, + >::Expression, + >, + Output = BoxedQuery>, >, + // Allows using "key" in in ".eq(...)". CollectionId: diesel::expression::AsExpression< SerializedCollectionPrimaryKey, @@ -154,23 +166,27 @@ pub trait DatastoreAttachTarget { SingleValue, // Allows calling "is_null()" on the time deleted column. ParentTimeDeletedColumn: ExpressionMethods, + + ParentGenerationColumn: ExpressionMethods, // Necessary for output type (`AttachToCollectionStatement`). ResourceType: Selectable, + + // XXX ? + as Expression>::SqlType: + SingleValue, { - let filter_subquery = Box::new( + let parent_table = || { as HasTable>::table() + }; + let filter_subquery = Box::new( + parent_table() .into_boxed() - .filter( - as HasTable>::table() - .primary_key() - .eq(key), - ) - .filter(Self::ParentTimeDeletedColumn::default().is_null()), + .filter(parent_table().primary_key().eq(key)) + .filter(Self::ParentTimeDeletedColumn::default().is_null()) + .filter(Self::ParentGenerationColumn::default().eq(rcgen)) ); - let from_clause = - as HasTable>::table() - .from_clause(); + let from_clause = parent_table().from_clause(); let returning_clause = ResourceType::as_returning(); AttachToCollectionStatement { update_statement: update, @@ -416,22 +432,39 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// This implementation uses the following CTE: /// /// ```text -/// // WITH found_row AS MATERIALIZED ( -/// // SELECT FROM C WHERE = AND -/// // IS NULL FOR UPDATE), +/// // WITH +/// // /* Look up the parent collection */ +/// // found_row AS MATERIALIZED ( +/// // SELECT FROM C WHERE +/// // = AND +/// // IS NULL AND +/// // = +/// // FOR UPDATE +/// // ), +/// // /* Return an error if the parent collection does not exist */ /// // dummy AS MATERIALIZED ( -/// // SELECT IF(EXISTS(SELECT FROM found_row), TRUE, -/// // CAST(1/0 AS BOOL))), +/// // SELECT IF( +/// // EXISTS(SELECT FROM found_row), +/// // TRUE, +/// // CAST(1/0 AS BOOL)) +/// // ), +/// // /* Update the generation number of the parent row */ /// // updated_parent_row AS MATERIALIZED ( /// // UPDATE C SET = + 1 WHERE -/// // IN (SELECT FROM found_row) RETURNING 1), -/// // updated_resource_row AS ( -/// // RETURNING ) +/// // IN (SELECT FROM found_row) +/// // RETURNING 1 +/// // ), +/// // /* Update the resource row */ +/// // updated_resource_row AS ( +/// // +/// // RETURNING +/// // ) /// // SELECT * FROM updated_resource_row; /// ``` /// -/// This CTE is equivalent in desired behavior to the one specified in -/// [RFD 192](https://rfd.shared.oxide.computer/rfd/0192#_dueling_administrators). +/// This CTE is similar in desired behavior to the one specified in +/// [RFD 192](https://rfd.shared.oxide.computer/rfd/0192#_dueling_administrators), +/// but tuned to the case of modifying an associated resource. /// /// The general idea is that the first clause of the CTE (the "dummy" table) /// will generate a divison-by-zero error and rollback the transaction if the @@ -514,6 +547,7 @@ mod test { use super::{AsyncInsertError, DatastoreAttachTarget, SyncInsertError}; use crate::db::{ self, error::TransactionError, identity::Resource as IdentityResource, + model::Generation, }; use async_bb8_diesel::{ AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, @@ -611,6 +645,7 @@ mod test { .unwrap(); let attach = Collection::attach_resource( collection_id, + Generation::new(), diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ); @@ -667,6 +702,7 @@ mod test { let resource_id = uuid::Uuid::new_v4(); let attach = Collection::attach_resource( collection_id, + Generation::new(), diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ) @@ -676,6 +712,7 @@ mod test { let attach_query = Collection::attach_resource( collection_id, + Generation::new(), diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ); @@ -755,6 +792,7 @@ mod test { DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); let resource = Collection::attach_resource( collection_id, + Generation::new(), diesel::update( resource::table .filter(resource::dsl::id.eq(resource_id)) From bee020b0db295119d8f790ece007bb0be95ce694 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 18 May 2022 18:08:59 -0400 Subject: [PATCH 04/29] collection attach - at least the query builds --- nexus/src/app/sagas/disk_attach.rs | 130 +++++ nexus/src/app/sagas/mod.rs | 6 + nexus/src/db/collection_attach.rs | 814 ++++++++++++++++++++--------- 3 files changed, 689 insertions(+), 261 deletions(-) create mode 100644 nexus/src/app/sagas/disk_attach.rs diff --git a/nexus/src/app/sagas/disk_attach.rs b/nexus/src/app/sagas/disk_attach.rs new file mode 100644 index 00000000000..402493c8693 --- /dev/null +++ b/nexus/src/app/sagas/disk_attach.rs @@ -0,0 +1,130 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::external_api::params; +use crate::saga_interface::SagaContext; +use crate::{authn, db}; +use lazy_static::lazy_static; +use serde::Deserialize; +use serde::Serialize; +use std::sync::Arc; +use steno::new_action_noop_undo; +use steno::ActionContext; +use steno::ActionError; +use steno::ActionFunc; +use steno::SagaTemplate; +use steno::SagaTemplateBuilder; +use steno::SagaType; +use uuid::Uuid; + +pub const SAGA_NAME: &'static str = "disk-attach"; + +lazy_static! { + pub static ref SAGA_TEMPLATE: Arc> = + Arc::new(saga_disk_attach()); +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct Params { + pub serialized_authn: authn::saga::Serialized, + pub instance_id: Uuid, + pub attach_params: params::InstanceDiskAttach, +} + +#[derive(Debug)] +pub struct SagaDiskAttach; +impl SagaType for SagaDiskAttach { + type SagaParamsType = Arc; + type ExecContextType = Arc; +} + +fn saga_disk_attach() -> SagaTemplate { + let mut template_builder = SagaTemplateBuilder::new(); + + template_builder.append( + "attaching_disk", + "SetDiskStateAttaching", + ActionFunc::new_action( + sda_set_disk_record_attaching, + sda_set_disk_record_attaching_undo, + ), + ); + + template_builder.append( + "sled_reported_runtime", + "UpdateSledAgent", + ActionFunc::new_action( + sda_update_sled_agent, + sda_update_sled_agent_undo, + ), + ); + + template_builder.append( + "disk_runtime", + "SetDiskStateAttached", + new_action_noop_undo(sda_set_disk_record_attached), + ); + + template_builder.build() +} + +async fn sda_set_disk_record_attaching( + sagactx: ActionContext, +) -> Result { + let _osagactx = sagactx.user_data(); + let _params = sagactx.saga_params(); + + // TODO: Issue CTE + // + // To actually perform the update: + // + // - Disk State must be: + // Attaching (w/Instance ID = ID | Attached (w/Instance ID = ID) | Detached + // + // - Instance state must be: + // Running | Starting | Rebooting | Migrating -> Issue attach to sled + // Stopping | Stopped | Repairing -> Update DB + // _ -> Error + // + // - # of attached disks must be less than capacity + + todo!(); +} + +async fn sda_set_disk_record_attaching_undo( + sagactx: ActionContext, +) -> Result<(), anyhow::Error> { + let _osagactx = sagactx.user_data(); + + // TODO: If we get here, we must have attached the disk. + // Ergo, set the state to "detached"? + todo!(); +} + +async fn sda_update_sled_agent( + sagactx: ActionContext, +) -> Result { + let _log = sagactx.user_data().log(); + + // TODO: call "disk_put" + todo!(); +} + +async fn sda_update_sled_agent_undo( + _sagactx: ActionContext, +) -> Result<(), anyhow::Error> { + + // TODO: Undo the "disk_put". + todo!(); +} + +async fn sda_set_disk_record_attached( + sagactx: ActionContext, +) -> Result { + let _osagactx = sagactx.user_data(); + + // TODO: Move the disk state from "Attaching" -> "Attached" + + todo!(); +} diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index ba3a88e7575..1b97ecb7e69 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -20,6 +20,7 @@ use steno::SagaTemplateGeneric; use steno::SagaType; use uuid::Uuid; +pub mod disk_attach; pub mod disk_create; pub mod disk_delete; pub mod instance_create; @@ -44,6 +45,11 @@ fn all_templates( Arc::clone(&instance_migrate::SAGA_TEMPLATE) as Arc>>, ), + ( + disk_attach::SAGA_NAME, + Arc::clone(&disk_attach::SAGA_TEMPLATE) + as Arc>>, + ), ( disk_create::SAGA_NAME, Arc::clone(&disk_create::SAGA_TEMPLATE) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 509f5e0ffe3..dba3e1fbd7f 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -2,30 +2,50 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! CTE implementation for updating a row representing a child resource of a -//! collection. This atomically -//! 1) Checks if the collection exists and is not soft deleted, and fails -//! otherwise -//! 2) Updates the collection's child resource generation number -//! 3) Updates the child resource row - -use crate::db::model::Generation; +//! CTE for attaching a resource to a collection. +//! +//! This atomically: +//! - Checks if the collection exists and is not soft deleted +//! - Updates the collection's resource generation number +//! - Updates the resource row + use super::pool::DbConnection; use async_bb8_diesel::{ - AsyncRunQueryDsl, ConnectionError, ConnectionManager, PoolError, + AsyncRunQueryDsl, ConnectionManager, PoolError, }; use diesel::associations::HasTable; -use diesel::expression::{AsExpression, Expression}; +use diesel::expression::Expression; use diesel::helper_types::*; use diesel::pg::Pg; use diesel::prelude::*; use diesel::query_builder::*; use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; -use diesel::sql_types::{BigInt, SingleValue}; +use diesel::sql_types::{BigInt, Nullable, SingleValue}; use std::fmt::Debug; use std::marker::PhantomData; +/// The table representing the collection. The resource references +/// this table. +type CollectionTable = <>::CollectionGenerationColumn as Column>::Table; +/// The table representing the resource. This table contains an +/// ID acting as a foreign key into the collection table. +type ResourceTable = <>::ResourceCollectionIdColumn as Column>::Table; +type CollectionGenerationColumn = + >::CollectionGenerationColumn; +type CollectionIdColumn = + >::CollectionIdColumn; +type ResourceIdColumn = + >::ResourceIdColumn; + +/// Trick to check that columns come from the same table +pub trait TypesAreSame {} +impl TypesAreSame for (T, T) {} + /// Trait to be implemented by structs representing an attachable collection. /// /// For example, since Instances have a one-to-many relationship with @@ -47,7 +67,6 @@ use std::marker::PhantomData; /// # test_schema.disk (id) { /// # id -> Uuid, /// # time_deleted -> Nullable, -/// # rcgen -> Int8, /// # instance_id -> Nullable, /// # } /// # } @@ -57,7 +76,6 @@ use std::marker::PhantomData; /// struct Disk { /// pub id: uuid::Uuid, /// pub time_deleted: Option>, -/// pub rcgen: Generation, /// pub instance_id: Option, /// } /// @@ -70,31 +88,41 @@ use std::marker::PhantomData; /// } /// /// impl DatastoreAttachTarget for Instance { -/// // Type of Instance::identity::id. -/// type CollectionId = uuid::Uuid; +/// // Type of instance::id and disk::id. +/// type Id = uuid::Uuid; /// -/// type ParentGenerationColumn = instance::dsl::rcgen; -/// type ParentTimeDeletedColumn = instance::dsl::time_deleted; +/// type CollectionIdColumn = instance::dsl::id; +/// type CollectionGenerationColumn = instance::dsl::rcgen; +/// type CollectionTimeDeletedColumn = instance::dsl::time_deleted; /// -/// type ChildCollectionIdColumn = disk::dsl::instance_id; +/// type ResourceIdColumn = disk::dsl::id; +/// type ResourceCollectionIdColumn = disk::dsl::instance_id; +/// type ResourceTimeDeletedColumn = disk::dsl::time_deleted; /// } /// ``` -pub trait DatastoreAttachTarget { - /// The Rust type of the collection id (typically Uuid for us) - type CollectionId: Copy + Debug; +pub trait DatastoreAttachTarget : Selectable { + /// The Rust type of the collection and resource ids (typically Uuid). + type Id: Copy + Debug + PartialEq + Send + 'static; + + type CollectionIdColumn: Column; /// The column in the CollectionTable that acts as a generation number. - /// This is the "child-resource-generation-number" in RFD 192. - type ParentGenerationColumn: Column + Default + Expression; + /// This is the "resource-generation-number" in RFD 192. + type CollectionGenerationColumn: Column + Default + Expression; /// The time deleted column in the CollectionTable // We enforce that this column comes from the same table as - // ParentGenerationColumn when defining attach_resource() below. - type ParentTimeDeletedColumn: Column + Default; + // CollectionGenerationColumn when defining attach_resource() below. + type CollectionTimeDeletedColumn: Column + Default; + + type ResourceIdColumn: Column; /// The column in the ResourceType that acts as a foreign key into /// the CollectionTable - type ChildCollectionIdColumn: Column; + type ResourceCollectionIdColumn: Column + Default; + + /// The time deleted column in the ResourceTable + type ResourceTimeDeletedColumn: Column + Default; /// Create a statement for attaching a resource to the given collection. /// @@ -105,10 +133,13 @@ pub trait DatastoreAttachTarget { /// CAUTION: The API does not currently enforce that `key` matches the value /// of the collection id within the attached row. fn attach_resource( - key: Self::CollectionId, - rcgen: Generation, - // TODO: I'd like to be able to add some filters on the parent type too. - // For example, checking the instance state. + collection_id: Self::Id, + resource_id: Self::Id, + + collection_query: BoxedQuery>, + resource_query: BoxedQuery>, + + max_attached_resources: usize, // Note that UpdateStatement's fourth argument defaults to Ret = // NoReturningClause. This enforces that the given input statement does @@ -116,9 +147,14 @@ pub trait DatastoreAttachTarget { update: UpdateStatement, U, V>, ) -> AttachToCollectionStatement where + // TODO: More of this? ( - ::Table, - ::Table, + ::Table, + ::Table, + ): TypesAreSame, + ( + ::Table, + ::Table, ): TypesAreSame, Self: Sized, // Enables the "table()" method. @@ -132,94 +168,147 @@ pub trait DatastoreAttachTarget { Pg, Output = BoxedDslOutput>, >, - // Allows treating "filter_subquery" as a boxed "dyn QueryFragment". + // Enables the "table()" method. + ResourceTable: HasTable
> + + 'static + + Send + + Table + // Allows calling ".into_boxed()" on the table. + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput>, + >, + // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". as QuerySource>::FromClause: QueryFragment + Send, - // Allows sending "filter_subquery" between threads. + // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". + as QuerySource>::FromClause: + QueryFragment + Send, + // Allows sending "collection_exists_query" between threads. as AsQuery>::SqlType: Send, + // Allows sending "resource_exists_query" between threads. + as AsQuery>::SqlType: Send, // Allows calling ".filter()" on the boxed table. BoxedQuery>: query_methods::FilterDsl< Eq< CollectionPrimaryKey, - CollectionId, + Self::Id, >, Output = BoxedQuery>, > + query_methods::FilterDsl< - IsNull>, + IsNull, Output = BoxedQuery>, - > + query_methods::FilterDsl< + >, + BoxedQuery>: + query_methods::FilterDsl< Eq< - ParentGenerationColumn, -// Expression, -// AsExpr>, - >::Expression, + ResourcePrimaryKey, + Self::Id, >, - Output = BoxedQuery>, - >, + Output = BoxedQuery>, + > + query_methods::FilterDsl< + Eq< + Self::ResourceCollectionIdColumn, + Self::Id, + >, + Output = BoxedQuery>, + > + query_methods::FilterDsl< + IsNull, + Output = BoxedQuery>, + >, - // Allows using "key" in in ".eq(...)". - CollectionId: diesel::expression::AsExpression< + // Allows using "id" in expressions (e.g. ".eq(...)") with... + Self::Id: diesel::expression::AsExpression< + // ... The Collection table's PK SerializedCollectionPrimaryKey, + > + diesel::expression::AsExpression< + // ... The Resource table's PK + SerializedResourcePrimaryKey, + > + diesel::expression::AsExpression< + // ... The Resource table's FK to the Collection table + SerializedResourceForeignKey >, as Expression>::SqlType: SingleValue, + as Expression>::SqlType: + SingleValue, + // Allows calling "is_null()" on the time deleted column. - ParentTimeDeletedColumn: ExpressionMethods, + Self::CollectionTimeDeletedColumn: ExpressionMethods, + Self::ResourceTimeDeletedColumn: ExpressionMethods, + Self::ResourceCollectionIdColumn: ExpressionMethods, - ParentGenerationColumn: ExpressionMethods, + Self::CollectionGenerationColumn: ExpressionMethods, // Necessary for output type (`AttachToCollectionStatement`). ResourceType: Selectable, // XXX ? - as Expression>::SqlType: + ::SqlType: + SingleValue, + ::SqlType: SingleValue, { - let parent_table = || { + let collection_table = || { as HasTable>::table() }; - let filter_subquery = Box::new( - parent_table() + let resource_table = || { + as HasTable>::table() + }; + + let collection_exists_query = Box::new( + collection_table() + .into_boxed() + .filter(collection_table().primary_key().eq(collection_id)) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()) + ); + let resource_exists_query = Box::new( + resource_table() + .into_boxed() + .filter(resource_table().primary_key().eq(resource_id)) + .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + ); + + let resource_count_query = Box::new( + resource_table() .into_boxed() - .filter(parent_table().primary_key().eq(key)) - .filter(Self::ParentTimeDeletedColumn::default().is_null()) - .filter(Self::ParentGenerationColumn::default().eq(rcgen)) + .filter(Self::ResourceCollectionIdColumn::default().eq(collection_id)) + .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + .count() + ); + + let collection_query = Box::new( + collection_query + .filter(collection_table().primary_key().eq(collection_id)) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()) ); - let from_clause = parent_table().from_clause(); - let returning_clause = ResourceType::as_returning(); + let resource_query = Box::new( + resource_query + .filter(resource_table().primary_key().eq(resource_id)) + .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + ); + + let collection_from_clause = collection_table().from_clause(); + let collection_returning_clause = Self::as_returning(); + let resource_returning_clause = ResourceType::as_returning(); AttachToCollectionStatement { - update_statement: update, - filter_subquery, - from_clause, - returning_clause, + collection_exists_query, + resource_exists_query, + resource_count_query, + collection_query, + resource_query, + max_attached_resources, + update_resource_statement: update, + collection_from_clause, + collection_returning_clause, + resource_returning_clause, query_type: PhantomData, } } } -/// Utility type to make trait bounds below easier to read. -type CollectionId = - >::CollectionId; -/// The table representing the collection. The resource references -/// this table. -type CollectionTable = <>::ParentGenerationColumn as Column>::Table; -/// The table representing the resource. This table contains an -/// ID acting as a foreign key into the collection table. -type ResourceTable = <>::ChildCollectionIdColumn as Column>::Table; -type ParentTimeDeletedColumn = - >::ParentTimeDeletedColumn; -type ParentGenerationColumn = - >::ParentGenerationColumn; - -// Trick to check that columns come from the same table -pub trait TypesAreSame {} -impl TypesAreSame for (T, T) {} - /// The CTE described in the module docs #[must_use = "Queries must be executed"] pub struct AttachToCollectionStatement @@ -227,10 +316,25 @@ where ResourceType: Selectable, C: DatastoreAttachTarget, { - update_statement: UpdateStatement, U, V>, - filter_subquery: Box + Send>, - from_clause: as QuerySource>::FromClause, - returning_clause: AsSelect, + // Query which answers: "Does the collection exist?" + collection_exists_query: Box + Send>, + // Query which answers: "Does the resource exist?" + resource_exists_query: Box + Send>, + // Query which answers: "How many resources are associated with the + // collection?" + resource_count_query: Box + Send>, + // A (mostly) user-provided query for validating the collection. + collection_query: Box + Send>, + // A (mostly) user-provided query for validating the resource. + resource_query: Box + Send>, + // The maximum number of resources which may be attached to the collection. + max_attached_resources: usize, + + // Update statement for the resource. + update_resource_statement: UpdateStatement, U, V>, + collection_from_clause: as QuerySource>::FromClause, + collection_returning_clause: AsSelect, + resource_returning_clause: AsSelect, query_type: PhantomData, } @@ -245,34 +349,49 @@ where } /// Result of [`AttachToCollectionStatement`] when executed asynchronously -pub type AsyncAttachToCollectionResult = Result; +pub type AsyncAttachToCollectionResult = Result>; +/* /// Result of [`AttachToCollectionStatement`] when executed synchronously -pub type SyncAttachToCollectionResult = Result; +pub type SyncAttachToCollectionResult = Result; +*/ /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] -pub enum AsyncInsertError { +pub enum AttachError { /// The collection that the query was inserting into does not exist CollectionNotFound, + /// The resource being attached does not exist + ResourceNotFound, + /// Too many resources are currently attached to the collection + TooManyAttached, + /// Although the resource and collection exist, the update did not occur + /// + /// The unchanged resource and collection are returned as a part of this + /// error; it is the responsibility of the caller to determine which + /// condition was not met. + NoUpdate(ResourceType, C), /// Other database error DatabaseError(PoolError), } +/* /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] -pub enum SyncInsertError { +pub enum SyncAttachError { /// The collection that the query was inserting into does not exist CollectionNotFound, /// Other database error DatabaseError(diesel::result::Error), } +*/ + +type RawOutput = (i64, Option, Option, Option, Option); impl AttachToCollectionStatement where ResourceType: 'static + Debug + Send + Selectable, C: 'static + DatastoreAttachTarget + Send, - CollectionId: 'static + PartialEq + Send, ResourceTable: 'static + Table + Send + Copy + Debug, U: 'static + Send, V: 'static + Send, @@ -287,34 +406,21 @@ where pub async fn attach_and_get_result_async( self, pool: &bb8::Pool>, - ) -> AsyncAttachToCollectionResult + ) -> AsyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. - Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, { - self.get_result_async::(pool) + let capacity = self.max_attached_resources; + self.get_result_async::>(pool) .await - .map_err(Self::translate_async_error) + // If the database returns an error, propagate it right away. + .map_err(|e| AttachError::DatabaseError(e)) + // Otherwise, parse the output to determine if the CTE succeeded. + .and_then(|r| Self::parse_result(r, capacity)) } - /// Issues the CTE asynchronously and parses the result. - /// - /// The three outcomes are: - /// - Ok(Vec of new rows) - /// - Error(collection not found) - /// - Error(other diesel error) - pub async fn attach_and_get_results_async( - self, - pool: &bb8::Pool>, - ) -> AsyncAttachToCollectionResult> - where - // We require this bound to ensure that "Self" is runnable as query. - Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, - { - self.get_results_async::(pool) - .await - .map_err(Self::translate_async_error) - } + /* /// Issues the CTE synchronously and parses the result. /// @@ -332,64 +438,67 @@ where { self.get_result::(conn) .map_err(Self::translate_sync_error) + .map(parse_result) } - /// Issues the CTE synchronously and parses the result. - /// - /// The three outcomes are: - /// - Ok(Vec of new rows) - /// - Error(collection not found) - /// - Error(other diesel error) - pub fn attach_and_get_results( - self, - conn: &mut DbConnection, - ) -> SyncAttachToCollectionResult> - where - // We require this bound to ensure that "Self" is runnable as query. - Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, - { - self.get_results::(conn) - .map_err(Self::translate_sync_error) - } + */ + + fn parse_result( + result: RawOutput, + capacity: usize, + ) -> Result> { + let ( + attached_count, + collection_before_update, + resource_before_update, + collection_after_update, + resource_after_update + ) = result; + + // TODO: avoid unwrap here + if attached_count >= capacity.try_into().unwrap() { + return Err(AttachError::TooManyAttached); + } - /// Check for the intentional division by zero error - fn error_is_division_by_zero(err: &diesel::result::Error) -> bool { - match err { - // See - // https://rfd.shared.oxide.computer/rfd/0192#_dueling_administrators - // for a full explanation of why we're checking for this. In - // summary, the CTE generates a division by zero intentionally - // if the collection doesn't exist in the database. - diesel::result::Error::DatabaseError( - diesel::result::DatabaseErrorKind::Unknown, - info, - ) if info.message() == "division by zero" => true, - _ => false, + let collection_before_update = + collection_before_update.ok_or_else(|| AttachError::CollectionNotFound)?; + + let resource_before_update = + resource_before_update.ok_or_else(|| AttachError::ResourceNotFound)?; + + match (collection_after_update, resource_after_update) { + (Some(_), Some(resource)) => Ok(resource), + (None, None) => Err(AttachError::NoUpdate(resource_before_update, collection_before_update)), + _ => panic!("Partial update applied - This is a CTE bug"), } } - /// Translate from diesel errors into AsyncInsertError, handling the + /* + /// Translate from diesel errors into AttachError, handling the /// intentional division-by-zero error in the CTE. - fn translate_async_error(err: PoolError) -> AsyncInsertError { + fn translate_async_error(err: PoolError) -> AttachError { match err { PoolError::Connection(ConnectionError::Query(err)) if Self::error_is_division_by_zero(&err) => { - AsyncInsertError::CollectionNotFound + AttachError::CollectionNotFound } - other => AsyncInsertError::DatabaseError(other), + other => AttachError::DatabaseError(other), } } + */ - /// Translate from diesel errors into SyncInsertError, handling the + /* + /// Translate from diesel errors into SyncAttachError, handling the /// intentional division-by-zero error in the CTE. - fn translate_sync_error(err: diesel::result::Error) -> SyncInsertError { + fn translate_sync_error(err: diesel::result::Error) -> SyncAttachError { if Self::error_is_division_by_zero(&err) { - SyncInsertError::CollectionNotFound + SyncAttachError::CollectionNotFound } else { - SyncInsertError::DatabaseError(err) + SyncAttachError::DatabaseError(err) } } + */ } type SelectableSqlType = @@ -401,7 +510,18 @@ where ResourceType: Selectable, C: DatastoreAttachTarget, { - type SqlType = SelectableSqlType; + type SqlType = ( + // The number of resources attached to the collection before update. + BigInt, + // If the collection exists, the value before update. + Nullable>, + // If the resource exists, the value before update. + Nullable>, + // If the collection was updated, the new value. + Nullable>, + // If the resource was updated, the new value. + Nullable>, + ); } impl RunQueryDsl @@ -415,9 +535,19 @@ where // Representation of Primary Key in Rust. type CollectionPrimaryKey = as Table>::PrimaryKey; +type ResourcePrimaryKey = + as Table>::PrimaryKey; +type ResourceForeignKey = >::ResourceCollectionIdColumn; + // Representation of Primary Key in SQL. type SerializedCollectionPrimaryKey = as diesel::Expression>::SqlType; +type SerializedResourcePrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourceForeignKey = + as diesel::Expression>::SqlType; type TableSqlType = ::SqlType; @@ -433,33 +563,63 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// /// ```text /// // WITH -/// // /* Look up the parent collection */ -/// // found_row AS MATERIALIZED ( -/// // SELECT FROM C WHERE -/// // = AND -/// // IS NULL AND -/// // = +/// // /* Look up the collection - Check for existence only! */ +/// // collection_by_id AS ( +/// // SELECT * FROM C +/// // WHERE = AND IS NULL /// // FOR UPDATE /// // ), -/// // /* Return an error if the parent collection does not exist */ -/// // dummy AS MATERIALIZED ( +/// // /* Look up the resource - Check for existence only! */ +/// // resource_by_id AS ( +/// // SELECT * FROM R +/// // WHERE = AND IS NULL +/// // FOR UPDATE +/// // ), +/// // /* Count the number of attached resources */ +/// // resource_count AS ( +/// // SELECT COUNT(*) FROM R +/// // WHERE = AND IS NULL +/// // ), +/// // /* Look up the collection - Check for additional constraints */ +/// // collection_info AS ( +/// // SELECT * FROM C +/// // WHERE = AND IS NULL AND +/// // +/// // FOR UPDATE +/// // ), +/// // /* Look up the resource - Check for additional constraints */ +/// // resource_info AS ( +/// // SELECT * FROM R +/// // WHERE = AND IS NULL AND +/// // +/// // FOR UPDATE +/// // ), +/// // /* Make a decision on whether or not to apply ANY updates */ +/// // do_update AS ( /// // SELECT IF( -/// // EXISTS(SELECT FROM found_row), -/// // TRUE, -/// // CAST(1/0 AS BOOL)) +/// // EXISTS(SELECT id FROM collection_info) AND +/// // EXISTS(SELECT id FROM resource_info) AND +/// // (SELECT * FROM resource_count) < , +/// // TRUE, FALSE), /// // ), -/// // /* Update the generation number of the parent row */ -/// // updated_parent_row AS MATERIALIZED ( -/// // UPDATE C SET = + 1 WHERE -/// // IN (SELECT FROM found_row) -/// // RETURNING 1 +/// // /* Update the generation number of the collection row */ +/// // updated_collection AS ( +/// // UPDATE C SET = + 1 +/// // WHERE IN (SELECT FROM collection_info) AND (SELECT * FROM do_update) +/// // RETURNING * /// // ), -/// // /* Update the resource row */ -/// // updated_resource_row AS ( -/// // -/// // RETURNING +/// // /* Update the resource */ +/// // updated_resource AS ( +/// // UPDATE R SET +/// // WHERE IN (SELECT FROM resource_info) AND (SELECT * FROM do_update) +/// // RETURNING * /// // ) -/// // SELECT * FROM updated_resource_row; +/// // SELECT +/// // (SELECT * FROM resource_count), +/// // COALESCE((SELECT * FROM collection_by_id)), +/// // COALESCE((SELECT * FROM resource_by_id)), +/// // COALESCE((SELECT * FROM updated_collection)); +/// // COALESCE((SELECT * FROM resource)); /// ``` /// /// This CTE is similar in desired behavior to the one specified in @@ -467,12 +627,12 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// but tuned to the case of modifying an associated resource. /// /// The general idea is that the first clause of the CTE (the "dummy" table) -/// will generate a divison-by-zero error and rollback the transaction if the +/// will generate a division-by-zero error and rollback the transaction if the /// target collection is not found in its table. It simultaneously locks the -/// row for update, to allow us to subsequently use the "updated_parent_row" query to -/// increase the child-resource generation count for the collection. In the same +/// row for update, to allow us to subsequently use the "updated_collection_row" query to +/// increase the resource generation count for the collection. In the same /// transaction, it performs the provided update statement, which should -/// update the child resource, referencing the collection ID to the parent +/// update the resource, referencing the collection ID to the collection /// collection we just checked for. /// /// NOTE: It is important that the WHERE clauses on the SELECT and UPDATE @@ -490,61 +650,86 @@ where ResourceType: Selectable, C: DatastoreAttachTarget, CollectionPrimaryKey: diesel::Column, - // Necessary to "walk_ast" over "select.from_clause". + // Necessary to "walk_ast" over "select.collection_from_clause". as QuerySource>::FromClause: QueryFragment, - // Necessary to "walk_ast" over "self.update_statement". + // Necessary to "walk_ast" over "self.update_resource_statement". UpdateStatement, U, V>: QueryFragment, - // Necessary to "walk_ast" over "self.returning_clause". + // Necessary to "walk_ast" over "self.resource_returning_clause". AsSelect: QueryFragment, + // Necessary to "walk_ast" over "self.collection_returning_clause". + AsSelect: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { - out.push_sql("WITH found_row AS MATERIALIZED ("); - self.filter_subquery.walk_ast(out.reborrow())?; - // Manually add the FOR_UPDATE, since .for_update() is incompatible with - // BoxedQuery + out.push_sql("WITH collection_by_id AS ("); + self.collection_exists_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("resource_by_id AS ("); + self.resource_exists_query.walk_ast(out.reborrow())?; out.push_sql(" FOR UPDATE), "); + + out.push_sql("resource_count AS ("); + self.resource_count_query.walk_ast(out.reborrow())?; + out.push_sql("), "); + + out.push_sql("collection_info AS ("); + self.collection_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("resource_info AS ("); + self.resource_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("do_update AS (SELECT IF(EXISTS(SELECT "); + out.push_identifier(CollectionIdColumn::::NAME)?; + out.push_sql(" FROM collection_info) AND EXISTS(SELECT "); + out.push_identifier(ResourceIdColumn::::NAME)?; out.push_sql( - "dummy AS MATERIALIZED (\ - SELECT IF(EXISTS(SELECT ", + &format!(" FROM resource_info) AND (SELECT * FROM resource_count) < {}, TRUE,FALSE)), ", + self.max_attached_resources) ); - out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql(" FROM found_row), TRUE, CAST(1/0 AS BOOL))), "); - // Write the update manually instead of with the dsl, to avoid the - // explosion in complexity of type traits - out.push_sql("updated_parent_row AS MATERIALIZED (UPDATE "); - self.from_clause.walk_ast(out.reborrow())?; + out.push_sql("updated_collection AS (UPDATE "); + self.collection_from_clause.walk_ast(out.reborrow())?; out.push_sql(" SET "); - out.push_identifier(ParentGenerationColumn::::NAME)?; + out.push_identifier(CollectionGenerationColumn::::NAME)?; out.push_sql(" = "); - out.push_identifier(ParentGenerationColumn::::NAME)?; + out.push_identifier(CollectionGenerationColumn::::NAME)?; out.push_sql(" + 1 WHERE "); out.push_identifier(CollectionPrimaryKey::::NAME)?; out.push_sql(" IN (SELECT "); - // We must include "RETURNING 1" since all CTE clauses must return - // something out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql(" FROM found_row) RETURNING 1), "); - - out.push_sql("updated_resource_row AS ("); - // TODO: Check or force the update_statement to have - // C::ChildCollectionIdColumn set - self.update_statement.walk_ast(out.reborrow())?; + out.push_sql(" FROM collection_info) AND (SELECT * FROM do_update) RETURNING "); + // TODO: You don't actually need to return anything here. We only care + // about the inserted resource... + self.collection_returning_clause.walk_ast(out.reborrow())?; + out.push_sql("), "); + + out.push_sql("updated_resource AS ("); + // TODO: Check or force the update_resource_statement to have + // C::ResourceCollectionIdColumn set + self.update_resource_statement.walk_ast(out.reborrow())?; + // TODO: Is this safe? There must be a WHERE clause for this to work. + out.push_sql(" AND (SELECT * FROM do_update)"); out.push_sql(" RETURNING "); - // We manually write the RETURNING clause here because the wrapper type - // used for UpdateStatement's Ret generic is private to diesel and so we - // cannot express it. - self.returning_clause.walk_ast(out.reborrow())?; + self.resource_returning_clause.walk_ast(out.reborrow())?; + out.push_sql(") "); + + out.push_sql("SELECT \ + (SELECT * FROM resource_count) as resource_count, \ + COALESCE((SELECT * FROM collection_by_id)) as collection_by_id, \ + COALESCE((SELECT * FROM resource_by_id)) as resource_by_id, \ + COALESCE((SELECT * FROM updated_collection)) as updated_collection, \ + COALESCE((SELECT * FROM updated_resource)) as updated_resource;"); - out.push_sql(") SELECT * FROM updated_resource_row"); Ok(()) } } #[cfg(test)] mod test { - use super::{AsyncInsertError, DatastoreAttachTarget, SyncInsertError}; + use super::{AttachError, DatastoreAttachTarget}; use crate::db::{ self, error::TransactionError, identity::Resource as IdentityResource, model::Generation, @@ -580,7 +765,6 @@ mod test { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, - rcgen -> Int8, collection_id -> Nullable, } } @@ -605,8 +789,10 @@ mod test { time_created TIMESTAMPTZ NOT NULL, \ time_modified TIMESTAMPTZ NOT NULL, \ time_deleted TIMESTAMPTZ, \ - rcgen INT NOT NULL, \ collection_id UUID); \ + CREATE INDEX IF NOT EXISTS collection_index ON test_schema.resource ( \ + collection_id \ + ) WHERE collection_id IS NOT NULL AND time_deleted IS NULL; \ TRUNCATE test_schema.collection; \ TRUNCATE test_schema.resource", ) @@ -620,72 +806,165 @@ mod test { struct Resource { #[diesel(embed)] pub identity: ResourceIdentity, + pub collection_id: Option, + } + #[derive(Queryable, Insertable, Debug, Resource, Selectable)] + #[diesel(table_name = collection)] + struct Collection { + #[diesel(embed)] + pub identity: CollectionIdentity, pub rcgen: i64, - pub collection_id: Option, } - struct Collection; impl DatastoreAttachTarget for Collection { - type CollectionId = uuid::Uuid; + type Id = uuid::Uuid; - type ParentGenerationColumn = collection::dsl::rcgen; - type ParentTimeDeletedColumn = collection::dsl::time_deleted; + type CollectionIdColumn = collection::dsl::id; + type CollectionGenerationColumn = collection::dsl::rcgen; + type CollectionTimeDeletedColumn = collection::dsl::time_deleted; - type ChildCollectionIdColumn = resource::dsl::collection_id; + type ResourceIdColumn = resource::dsl::id; + type ResourceCollectionIdColumn = resource::dsl::collection_id; + type ResourceTimeDeletedColumn = resource::dsl::time_deleted; } #[test] fn test_verify_query() { let collection_id = - uuid::Uuid::parse_str("223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0") + uuid::Uuid::parse_str("cccccccc-cccc-cccc-cccc-cccccccccccc") .unwrap(); let resource_id = - uuid::Uuid::parse_str("223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8") + uuid::Uuid::parse_str("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa") .unwrap(); let attach = Collection::attach_resource( collection_id, - Generation::new(), + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 12345, diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ); let query = diesel::debug_query::(&attach).to_string(); let expected_query = "WITH \ - found_row AS MATERIALIZED (SELECT \ - \"test_schema\".\"collection\".\"id\", \ - \"test_schema\".\"collection\".\"name\", \ - \"test_schema\".\"collection\".\"description\", \ - \"test_schema\".\"collection\".\"time_created\", \ - \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\" \ - FROM \"test_schema\".\"collection\" WHERE (\ - (\"test_schema\".\"collection\".\"id\" = $1) AND \ - (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ - ) FOR UPDATE), \ - dummy AS MATERIALIZED (SELECT IF(\ - EXISTS(SELECT \"id\" FROM found_row), \ - TRUE, CAST(1/0 AS BOOL))), \ - updated_parent_row AS MATERIALIZED (UPDATE \ - \"test_schema\".\"collection\" SET \"rcgen\" = \"rcgen\" + 1 \ - WHERE \"id\" IN (SELECT \"id\" FROM found_row) RETURNING 1), \ - updated_resource_row AS (UPDATE \"test_schema\".\"resource\" \ - SET \"collection_id\" = $2 \ - WHERE (\"test_schema\".\"resource\".\"id\" = $3) \ - RETURNING \"test_schema\".\"resource\".\"id\", \ - \"test_schema\".\"resource\".\"name\", \ - \"test_schema\".\"resource\".\"description\", \ - \"test_schema\".\"resource\".\"time_created\", \ - \"test_schema\".\"resource\".\"time_modified\", \ - \"test_schema\".\"resource\".\"time_deleted\", \ - \"test_schema\".\"resource\".\"rcgen\", \ - \"test_schema\".\"resource\".\"collection_id\") \ - SELECT * FROM updated_resource_row \ - -- binds: [223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0, \ - 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d0, \ - 223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8]"; - + collection_by_id AS (\ + SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\" \ + FROM \"test_schema\".\"collection\" \ + WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $1) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + resource_by_id AS (\ + SELECT \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\" \ + FROM \"test_schema\".\"resource\" \ + WHERE (\ + (\"test_schema\".\"resource\".\"id\" = $2) AND \ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + resource_count AS (\ + SELECT COUNT(*) \ + FROM \"test_schema\".\"resource\" \ + WHERE (\ + (\"test_schema\".\"resource\".\"collection_id\" = $3) AND \ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)\ + )\ + ), \ + collection_info AS (\ + SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\" \ + FROM \"test_schema\".\"collection\" \ + WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $4) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + resource_info AS (\ + SELECT \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\" \ + FROM \"test_schema\".\"resource\" \ + WHERE (\ + (\"test_schema\".\"resource\".\"id\" = $5) AND \ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + do_update AS (\ + SELECT IF(\ + EXISTS(SELECT \"id\" FROM collection_info) AND \ + EXISTS(SELECT \"id\" FROM resource_info) AND \ + (SELECT * FROM resource_count) < 12345, \ + TRUE,\ + FALSE)\ + ), \ + updated_collection AS (\ + UPDATE \ + \"test_schema\".\"collection\" \ + SET \ + \"rcgen\" = \"rcgen\" + 1 \ + WHERE \ + \"id\" IN (SELECT \"id\" FROM collection_info) AND \ + (SELECT * FROM do_update) \ + RETURNING \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\"\ + ), \ + updated_resource AS (\ + UPDATE \ + \"test_schema\".\"resource\" \ + SET \ + \"collection_id\" = $6 \ + WHERE \ + (\"test_schema\".\"resource\".\"id\" = $7) AND \ + (SELECT * FROM do_update) \ + RETURNING \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\"\ + ) \ + SELECT \ + (SELECT * FROM resource_count) as resource_count, \ + COALESCE((SELECT * FROM collection_by_id)) as collection_by_id, \ + COALESCE((SELECT * FROM resource_by_id)) as resource_by_id, \ + COALESCE((SELECT * FROM updated_collection)) as updated_collection, \ + COALESCE((SELECT * FROM updated_resource)) as updated_resource; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; assert_eq!(query, expected_query); } @@ -702,17 +981,28 @@ mod test { let resource_id = uuid::Uuid::new_v4(); let attach = Collection::attach_resource( collection_id, - Generation::new(), + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 12345, diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) .await; - assert!(matches!(attach, Err(AsyncInsertError::CollectionNotFound))); + + eprintln!("result: {:?}", attach); + + assert!(matches!(attach, Err(AttachError::CollectionNotFound))); + + /* let attach_query = Collection::attach_resource( collection_id, - Generation::new(), + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 12345, diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ); @@ -727,10 +1017,10 @@ mod test { .pool() .transaction(move |conn| { attach_query.attach_and_get_result(conn).map_err(|e| match e { - SyncInsertError::CollectionNotFound => { + SyncAttachError::CollectionNotFound => { TxnError::CustomError(CollectionError::NotFound) } - SyncInsertError::DatabaseError(e) => TxnError::from(e), + SyncAttachError::DatabaseError(e) => TxnError::from(e), }) }) .await; @@ -740,10 +1030,13 @@ mod test { Err(TxnError::CustomError(CollectionError::NotFound)) )); + */ + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + /* #[tokio::test] async fn test_collection_present() { let logctx = dev::test_setup_log("test_collection_present"); @@ -780,7 +1073,6 @@ mod test { resource::dsl::description.eq("desc"), resource::dsl::time_created.eq(insert_time), resource::dsl::time_modified.eq(insert_time), - resource::dsl::rcgen.eq(1), resource::dsl::collection_id.eq(Option::::None), )]) .execute_async(pool.pool()) @@ -800,7 +1092,6 @@ mod test { ).set(( resource::dsl::collection_id.eq(collection_id), resource::dsl::time_modified.eq(update_time), - resource::dsl::rcgen.eq(resource::dsl::rcgen + 1), )) ) .attach_and_get_result_async(pool.pool()) @@ -812,7 +1103,6 @@ mod test { assert_eq!(resource.time_created(), insert_time); assert_eq!(resource.time_modified(), update_time); assert_eq!(resource.collection_id.unwrap(), collection_id); - assert_eq!(resource.rcgen, 2); let collection_rcgen = collection::table .find(collection_id) @@ -827,4 +1117,6 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + */ } From 7e3d52058974ba767a58a6701257ea78a5aac1e1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 11:10:38 -0400 Subject: [PATCH 05/29] ... it... works? --- nexus/src/db/collection_attach.rs | 69 +++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 22 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index dba3e1fbd7f..ef0b51b5b79 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -386,12 +386,12 @@ pub enum SyncAttachError { } */ -type RawOutput = (i64, Option, Option, Option, Option); +pub type RawOutput = (i64, Option, Option, Option, Option); impl AttachToCollectionStatement where ResourceType: 'static + Debug + Send + Selectable, - C: 'static + DatastoreAttachTarget + Send, + C: 'static + Debug + DatastoreAttachTarget + Send, ResourceTable: 'static + Table + Send + Copy + Debug, U: 'static + Send, V: 'static + Send, @@ -415,7 +415,10 @@ where self.get_result_async::>(pool) .await // If the database returns an error, propagate it right away. - .map_err(|e| AttachError::DatabaseError(e)) + .map_err(|e| { + eprintln!("ERROR from DB - not parsing result"); + AttachError::DatabaseError(e) + }) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(|r| Self::parse_result(r, capacity)) } @@ -447,6 +450,8 @@ where result: RawOutput, capacity: usize, ) -> Result> { + eprintln!("Parsing DB result: {:?}", result); + let ( attached_count, collection_before_update, @@ -614,12 +619,12 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // WHERE IN (SELECT FROM resource_info) AND (SELECT * FROM do_update) /// // RETURNING * /// // ) -/// // SELECT -/// // (SELECT * FROM resource_count), -/// // COALESCE((SELECT * FROM collection_by_id)), -/// // COALESCE((SELECT * FROM resource_by_id)), -/// // COALESCE((SELECT * FROM updated_collection)); -/// // COALESCE((SELECT * FROM resource)); +/// // SELECT * FROM +/// // (SELECT * FROM resource_count) +/// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE +/// // LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE +/// // LEFT JOIN (SELECT * FROM updated_collection) ON TRUE +/// // LEFT JOIN (SELECT * FROM resource) ON TRUE; /// ``` /// /// This CTE is similar in desired behavior to the one specified in @@ -661,6 +666,7 @@ where AsSelect: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + out.unsafe_to_cache_prepared(); out.push_sql("WITH collection_by_id AS ("); self.collection_exists_query.walk_ast(out.reborrow())?; out.push_sql(" FOR UPDATE), "); @@ -716,12 +722,31 @@ where self.resource_returning_clause.walk_ast(out.reborrow())?; out.push_sql(") "); - out.push_sql("SELECT \ - (SELECT * FROM resource_count) as resource_count, \ - COALESCE((SELECT * FROM collection_by_id)) as collection_by_id, \ - COALESCE((SELECT * FROM resource_by_id)) as resource_by_id, \ - COALESCE((SELECT * FROM updated_collection)) as updated_collection, \ - COALESCE((SELECT * FROM updated_resource)) as updated_resource;"); + // Why do all these LEFT JOINs here? In short, to ensure that we are + // always returning a constant number of columns. + // + // Diesel parses output "one column at a time", mapping to structs or + // tuples. For example, when deserializing an "Option<(A, B, C)>" object, + // Diesel checks nullability of the "A", "B", and "C" columns. + // If any of those columns unexpectedly return NULL, the entire object is + // treated as "None". + // + // In summary: + // - Without the LEFT JOINs, we'd occassionally be returning "zero + // rows", which would make the output entirely unparseable. + // - If we used an operation like COALESCE (which attempts to map the + // result of an expression to either "NULL" or a single tuple column), + // Diesel struggles to map the result back to a structure. + // + // By returning a static number of columns, each component of the + // "RawOutput" tuple can be parsed, regardless of nullability, without + // preventing later portions of the result from being parsed. + out.push_sql("SELECT * FROM \ + (SELECT * FROM resource_count) \ + LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_resource) ON TRUE;"); Ok(()) } @@ -959,12 +984,12 @@ mod test { \"test_schema\".\"resource\".\"time_deleted\", \ \"test_schema\".\"resource\".\"collection_id\"\ ) \ - SELECT \ - (SELECT * FROM resource_count) as resource_count, \ - COALESCE((SELECT * FROM collection_by_id)) as collection_by_id, \ - COALESCE((SELECT * FROM resource_by_id)) as resource_by_id, \ - COALESCE((SELECT * FROM updated_collection)) as updated_collection, \ - COALESCE((SELECT * FROM updated_resource)) as updated_resource; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; + SELECT * FROM \ + (SELECT * FROM resource_count) \ + LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; assert_eq!(query, expected_query); } @@ -991,7 +1016,7 @@ mod test { .attach_and_get_result_async(pool.pool()) .await; - eprintln!("result: {:?}", attach); + eprintln!("!!!! result: {:?}", attach); assert!(matches!(attach, Err(AttachError::CollectionNotFound))); From 2d79a1cc1d44b534fa6f0f706f2bec2d662418a2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 14:58:28 -0400 Subject: [PATCH 06/29] expand tests --- nexus/src/db/collection_attach.rs | 553 ++++++++++++++++++++++++++++-- 1 file changed, 529 insertions(+), 24 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index ef0b51b5b79..405cab47805 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -214,6 +214,9 @@ pub trait DatastoreAttachTarget : Selectable { Self::Id, >, Output = BoxedQuery>, + > + query_methods::FilterDsl< + IsNull, + Output = BoxedQuery>, > + query_methods::FilterDsl< IsNull, Output = BoxedQuery>, @@ -257,6 +260,8 @@ pub trait DatastoreAttachTarget : Selectable { as HasTable>::table() }; + // Create new queries to determine if the collection and resources + // already exist. let collection_exists_query = Box::new( collection_table() .into_boxed() @@ -270,6 +275,8 @@ pub trait DatastoreAttachTarget : Selectable { .filter(Self::ResourceTimeDeletedColumn::default().is_null()) ); + // Additionally, construct a new query to count the number of + // already attached instances. let resource_count_query = Box::new( resource_table() .into_boxed() @@ -278,16 +285,23 @@ pub trait DatastoreAttachTarget : Selectable { .count() ); + // However, for the queries which decide whether or not we'll update, + // extend the user-provided arguments. + // + // We force these queries to: + // - Check against the primary key of the target objects + // - Ensure the objects are not deleted + // - (for the resource) Ensure it is not already attached let collection_query = Box::new( collection_query .filter(collection_table().primary_key().eq(collection_id)) .filter(Self::CollectionTimeDeletedColumn::default().is_null()) ); - let resource_query = Box::new( resource_query .filter(resource_table().primary_key().eq(resource_id)) .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + .filter(Self::ResourceCollectionIdColumn::default().is_null()) ); let collection_from_clause = collection_table().from_clause(); @@ -363,14 +377,16 @@ pub enum AttachError { CollectionNotFound, /// The resource being attached does not exist ResourceNotFound, - /// Too many resources are currently attached to the collection - TooManyAttached, /// Although the resource and collection exist, the update did not occur /// /// The unchanged resource and collection are returned as a part of this /// error; it is the responsibility of the caller to determine which /// condition was not met. - NoUpdate(ResourceType, C), + NoUpdate { + attached_count: i64, + resource: ResourceType, + collection: C, + }, /// Other database error DatabaseError(PoolError), } @@ -411,7 +427,6 @@ where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, { - let capacity = self.max_attached_resources; self.get_result_async::>(pool) .await // If the database returns an error, propagate it right away. @@ -420,7 +435,7 @@ where AttachError::DatabaseError(e) }) // Otherwise, parse the output to determine if the CTE succeeded. - .and_then(|r| Self::parse_result(r, capacity)) + .and_then(Self::parse_result) } /* @@ -448,7 +463,6 @@ where fn parse_result( result: RawOutput, - capacity: usize, ) -> Result> { eprintln!("Parsing DB result: {:?}", result); @@ -460,11 +474,6 @@ where resource_after_update ) = result; - // TODO: avoid unwrap here - if attached_count >= capacity.try_into().unwrap() { - return Err(AttachError::TooManyAttached); - } - let collection_before_update = collection_before_update.ok_or_else(|| AttachError::CollectionNotFound)?; @@ -473,7 +482,13 @@ where match (collection_after_update, resource_after_update) { (Some(_), Some(resource)) => Ok(resource), - (None, None) => Err(AttachError::NoUpdate(resource_before_update, collection_before_update)), + (None, None) => { + Err(AttachError::NoUpdate { + attached_count, + resource: resource_before_update, + collection: collection_before_update + }) + } _ => panic!("Partial update applied - This is a CTE bug"), } } @@ -596,7 +611,7 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // resource_info AS ( /// // SELECT * FROM R /// // WHERE = AND IS NULL AND -/// // +/// // IS NULL AND /// // FOR UPDATE /// // ), /// // /* Make a decision on whether or not to apply ANY updates */ @@ -767,8 +782,11 @@ mod test { use diesel::expression_methods::ExpressionMethods; use diesel::pg::Pg; use diesel::QueryDsl; + use diesel::SelectableHelper; use nexus_test_utils::db::test_setup_database; + use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; + use uuid::Uuid; table! { test_schema.collection (id) { @@ -826,7 +844,7 @@ mod test { } /// Describes a resource within the database. - #[derive(Queryable, Insertable, Debug, Resource, Selectable)] + #[derive(Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq)] #[diesel(table_name = resource)] struct Resource { #[diesel(embed)] @@ -834,7 +852,7 @@ mod test { pub collection_id: Option, } - #[derive(Queryable, Insertable, Debug, Resource, Selectable)] + #[derive(Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq)] #[diesel(table_name = collection)] struct Collection { #[diesel(embed)] @@ -854,6 +872,62 @@ mod test { type ResourceTimeDeletedColumn = resource::dsl::time_deleted; } + async fn insert_collection(id: Uuid, name: &str, pool: &db::Pool) -> Collection { + let create_params = IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "description".to_string(), + }; + let c = Collection { + identity: CollectionIdentity::new(id, create_params), + rcgen: 1, + }; + + diesel::insert_into(collection::table) + .values(c) + .execute_async(pool.pool()) + .await + .unwrap(); + + get_collection(id, &pool).await + } + + async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + collection::table + .find(id) + .select(Collection::as_select()) + .first_async(pool.pool()) + .await + .unwrap() + } + + async fn insert_resource(id: Uuid, name: &str, pool: &db::Pool) -> Resource { + let create_params = IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "description".to_string(), + }; + let r = Resource { + identity: ResourceIdentity::new(id, create_params), + collection_id: None, + }; + + diesel::insert_into(resource::table) + .values(r) + .execute_async(pool.pool()) + .await + .unwrap(); + + get_resource(id, &pool).await + } + + async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + resource::table + .find(id) + .select(Resource::as_select()) + .first_async(pool.pool()) + .await + .unwrap() + } + #[test] fn test_verify_query() { let collection_id = @@ -937,9 +1011,10 @@ mod test { \"test_schema\".\"resource\".\"time_deleted\", \ \"test_schema\".\"resource\".\"collection_id\" \ FROM \"test_schema\".\"resource\" \ - WHERE (\ + WHERE ((\ (\"test_schema\".\"resource\".\"id\" = $5) AND \ - (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)\ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)) AND \ + (\"test_schema\".\"resource\".\"collection_id\" IS NULL)\ ) FOR UPDATE\ ), \ do_update AS (\ @@ -994,8 +1069,8 @@ mod test { } #[tokio::test] - async fn test_collection_not_present() { - let logctx = dev::test_setup_log("test_collection_not_present"); + async fn test_attach_missing_collection_fails() { + let logctx = dev::test_setup_log("test_attach_missing_collection_fails"); let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); @@ -1009,15 +1084,13 @@ mod test { resource_id, collection::table.into_boxed(), resource::table.into_boxed(), - 12345, + 10, diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) .await; - eprintln!("!!!! result: {:?}", attach); - assert!(matches!(attach, Err(AttachError::CollectionNotFound))); /* @@ -1027,7 +1100,7 @@ mod test { resource_id, collection::table.into_boxed(), resource::table.into_boxed(), - 12345, + 10, diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) .set(resource::dsl::collection_id.eq(collection_id)) ); @@ -1061,6 +1134,438 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_attach_missing_resource_fails() { + let logctx = dev::test_setup_log("test_attach_missing_resource_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection + let collection = insert_collection(collection_id, "collection", &pool).await; + + // Attempt to attach - even though the resource does not exist. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + + assert!(matches!(attach, Err(AttachError::ResourceNotFound))); + // The collection should remain unchanged. + assert_eq!(collection, get_collection(collection_id, &pool).await); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_once() { + let logctx = dev::test_setup_log("test_attach_once"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let collection = insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + + // Attach the resource to the collection. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + + // "attach_and_get_result_async" should return the "attached" resource. + let returned_resource = attach.expect("Attach should have worked"); + assert_eq!( + returned_resource.collection_id.expect("Expected a collection ID"), + collection_id + ); + // The returned resource value should be the latest value in the DB. + assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + // The generation number should have incremented in the collection. + assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_multiple_times() { + let logctx = dev::test_setup_log("test_attach_multiple_times"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + const RESOURCE_COUNT: usize = 5; + + let collection_id = uuid::Uuid::new_v4(); + + // Create the collection. + let collection = insert_collection(collection_id, "collection", &pool).await; + + // Create each resource, attaching them to the collection. + for i in 0..RESOURCE_COUNT { + let resource_id = uuid::Uuid::new_v4(); + insert_resource(resource_id, &format!("resource{}", i), &pool).await; + + // Attach the resource to the collection. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + RESOURCE_COUNT, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + + // "attach_and_get_result_async" should return the "attached" resource. + let returned_resource = attach.expect("Attach should have worked"); + assert_eq!( + returned_resource.collection_id.expect("Expected a collection ID"), + collection_id + ); + // The returned resource value should be the latest value in the DB. + assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + + // The generation number should have incremented in the collection. + assert_eq!( + collection.rcgen + 1 + i64::try_from(i).unwrap(), + get_collection(collection_id, &pool).await.rcgen + ); + } + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_beyond_capacity_fails() { + let logctx = dev::test_setup_log("test_attach_beyond_capacity_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + // Attach a resource to a collection, as usual. + let collection = insert_collection(collection_id, "collection", &pool).await; + let resource_id1 = uuid::Uuid::new_v4(); + let _resource = insert_resource(resource_id1, "resource1", &pool).await; + let attach = Collection::attach_resource( + collection_id, + resource_id1, + collection::table.into_boxed(), + resource::table.into_boxed(), + 1, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id1))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + assert_eq!(attach.expect("Attach should have worked").id(), resource_id1); + + // Let's try attaching a second resource, now that we're at capacity. + let resource_id2 = uuid::Uuid::new_v4(); + let _resource = insert_resource(resource_id2, "resource2", &pool).await; + let attach = Collection::attach_resource( + collection_id, + resource_id2, + collection::table.into_boxed(), + resource::table.into_boxed(), + 1, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id1))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + + let err = attach.expect_err("Should have failed to attach"); + match err { + AttachError::NoUpdate { attached_count, resource, collection } => { + assert_eq!(attached_count, 1); + assert_eq!(resource, get_resource(resource_id2, &pool).await); + assert_eq!(collection, get_collection(collection_id, &pool).await); + }, + _ => panic!("Unexpected error: {:?}", err), + }; + + // The generation number should only have bumped once. + assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_while_already_attached() { + let logctx = dev::test_setup_log("test_attach_while_already_attached"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + // Attach a resource to a collection, as usual. + let collection = insert_collection(collection_id, "collection", &pool).await; + let resource_id = uuid::Uuid::new_v4(); + let _resource = insert_resource(resource_id, "resource", &pool).await; + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + assert_eq!(attach.expect("Attach should have worked").id(), resource_id); + + // Try attaching when well below the capacity. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + let err = attach.expect_err("Should have failed to attach"); + + // A caller should be able to inspect this result, see that the count of + // attached devices is below capacity, and that resource.collection_id + // is already set. This should provide enough context to identify "the + // resource is already attached". + match err { + AttachError::NoUpdate { attached_count, resource, collection } => { + assert_eq!(attached_count, 1); + assert_eq!( + *resource.collection_id.as_ref().expect("Should already be attached"), + collection_id + ); + assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(collection, get_collection(collection_id, &pool).await); + }, + _ => panic!("Unexpected error: {:?}", err), + }; + + // Let's try attaching the same resource again - while at capacity. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 1, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + let err = attach.expect_err("Should have failed to attach"); + // Even when at capacity, the same information should be propagated back + // to the caller. + match err { + AttachError::NoUpdate { attached_count, resource, collection } => { + assert_eq!(attached_count, 1); + assert_eq!( + *resource.collection_id.as_ref().expect("Should already be attached"), + collection_id + ); + assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(collection, get_collection(collection_id, &pool).await); + }, + _ => panic!("Unexpected error: {:?}", err), + }; + + // The generation number should only have bumped once, from the original + // resource insertion. + assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_with_filters() { + let logctx = dev::test_setup_log("test_attach_once"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let collection = insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + + // Attach the resource to the collection. + // + // Note that we are also filtering for specific conditions on the + // collection and resource - admittedly, just the name, but this could + // also be used to check the state of a disk, instance, etc. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.filter(collection::name.eq("collection")).into_boxed(), + resource::table.filter(resource::name.eq("resource")).into_boxed(), + 10, + // When actually performing the update, update the collection ID + // as well as an auxiliary field - the description. + // + // This provides an example of how one could attach an ID and update + // the state of a resource simultaneously. + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(( + resource::dsl::collection_id.eq(collection_id), + resource::dsl::description.eq("new description".to_string()) + )) + ) + .attach_and_get_result_async(pool.pool()) + .await; + + let returned_resource = attach.expect("Attach should have worked"); + assert_eq!( + returned_resource.collection_id.expect("Expected a collection ID"), + collection_id + ); + assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource.description(), "new description"); + assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_deleted_resource_fails() { + let logctx = dev::test_setup_log("test_attach_deleted_resource_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let _collection = insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + + // Immediately soft-delete the resource. + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::time_deleted.eq(Utc::now())) + .execute_async(pool.pool()) + .await + .unwrap(); + + // Attach the resource to the collection. Observe a failure which is + // indistinguishable from the resource not existing. + let attach = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + assert!(matches!(attach, Err(AttachError::ResourceNotFound))); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_attach_no_update_filter() { + let logctx = dev::test_setup_log("test_attach_no_update_filter"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + // Create the collection and some resources. + let _collection = insert_collection(collection_id, "collection", &pool).await; + let resource_id1 = uuid::Uuid::new_v4(); + let resource_id2 = uuid::Uuid::new_v4(); + let _resource1 = insert_resource(resource_id1, "resource1", &pool).await; + let _resource2 = insert_resource(resource_id2, "resource2", &pool).await; + + // Attach the resource to the collection. + // + // NOTE: In the update statement, we aren't filtering by resource ID. + let attach = Collection::attach_resource( + collection_id, + resource_id1, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table).set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await; + + let returned_resource = attach.expect("Attach should have worked"); + assert_eq!(returned_resource.id(), resource_id1); + + assert_eq!(get_resource(resource_id1, &pool).await.collection_id.unwrap(), collection_id); + assert!(get_resource(resource_id2, &pool).await.collection_id.is_none()); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + // TODO: test no filter in update? + // TODO: test no filter in update? + // TODO: Try to break things + // TODO: Sync API + /* #[tokio::test] async fn test_collection_present() { From f599e3f80ffc192d86e4d369840c94afceec08bb Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 15:07:30 -0400 Subject: [PATCH 07/29] touch-ups --- nexus/src/db/collection_attach.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 405cab47805..14db8b8a027 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -44,7 +44,7 @@ type ResourceIdColumn = /// Trick to check that columns come from the same table pub trait TypesAreSame {} -impl TypesAreSame for (T, T) {} +impl TypesAreSame for (T, T, T) {} /// Trait to be implemented by structs representing an attachable collection. /// @@ -104,6 +104,7 @@ pub trait DatastoreAttachTarget : Selectable { /// The Rust type of the collection and resource ids (typically Uuid). type Id: Copy + Debug + PartialEq + Send + 'static; + /// The primary key column of the collection. type CollectionIdColumn: Column; /// The column in the CollectionTable that acts as a generation number. @@ -111,14 +112,12 @@ pub trait DatastoreAttachTarget : Selectable { type CollectionGenerationColumn: Column + Default + Expression; /// The time deleted column in the CollectionTable - // We enforce that this column comes from the same table as - // CollectionGenerationColumn when defining attach_resource() below. type CollectionTimeDeletedColumn: Column + Default; + /// The primary key column of the resource type ResourceIdColumn: Column; - /// The column in the ResourceType that acts as a foreign key into - /// the CollectionTable + /// The column in the resource acting as a foreign key into the Collection type ResourceCollectionIdColumn: Column + Default; /// The time deleted column in the ResourceTable @@ -147,12 +146,13 @@ pub trait DatastoreAttachTarget : Selectable { update: UpdateStatement, U, V>, ) -> AttachToCollectionStatement where - // TODO: More of this? ( + ::Table, ::Table, ::Table, ): TypesAreSame, ( + ::Table, ::Table, ::Table, ): TypesAreSame, @@ -1520,8 +1520,8 @@ mod test { } #[tokio::test] - async fn test_attach_no_update_filter() { - let logctx = dev::test_setup_log("test_attach_no_update_filter"); + async fn test_attach_without_update_filter() { + let logctx = dev::test_setup_log("test_attach_without_update_filter"); let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); @@ -1561,7 +1561,6 @@ mod test { logctx.cleanup_successful(); } - // TODO: test no filter in update? // TODO: test no filter in update? // TODO: Try to break things // TODO: Sync API From ead8ea40f8f96a43c611f586fd0b2583b34f007c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 15:40:23 -0400 Subject: [PATCH 08/29] Enforced update WHERE --- nexus/src/db/collection_attach.rs | 112 +++++++++++++++++++----------- 1 file changed, 70 insertions(+), 42 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 14db8b8a027..a3a7a03811c 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -131,7 +131,7 @@ pub trait DatastoreAttachTarget : Selectable { /// /// CAUTION: The API does not currently enforce that `key` matches the value /// of the collection id within the attached row. - fn attach_resource( + fn attach_resource( collection_id: Self::Id, resource_id: Self::Id, @@ -144,7 +144,7 @@ pub trait DatastoreAttachTarget : Selectable { // NoReturningClause. This enforces that the given input statement does // not have a RETURNING clause. update: UpdateStatement, U, V>, - ) -> AttachToCollectionStatement + ) -> AttachToCollectionStatement where ( ::Table, @@ -203,17 +203,17 @@ pub trait DatastoreAttachTarget : Selectable { >, BoxedQuery>: query_methods::FilterDsl< - Eq< - ResourcePrimaryKey, - Self::Id, - >, - Output = BoxedQuery>, + Eq< + ResourcePrimaryKey, + Self::Id, + >, + Output = BoxedQuery>, > + query_methods::FilterDsl< - Eq< - Self::ResourceCollectionIdColumn, - Self::Id, - >, - Output = BoxedQuery>, + Eq< + Self::ResourceCollectionIdColumn, + Self::Id, + >, + Output = BoxedQuery>, > + query_methods::FilterDsl< IsNull, Output = BoxedQuery>, @@ -222,6 +222,27 @@ pub trait DatastoreAttachTarget : Selectable { Output = BoxedQuery>, >, + // See: "update_resource_statement". + // + // Allows calling "update.into_boxed()" + UpdateStatement, U, V>: + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement<'static, Pg, ResourceTable, V>, + >, + // Allows calling + // ".filter(resource_table().primary_key().eq(resource_id)" on the + // boxed update statement. + BoxedUpdateStatement<'static, Pg, ResourceTable, V>: + query_methods::FilterDsl< + Eq< + ResourcePrimaryKey, + Self::Id, + >, + Output = BoxedUpdateStatement<'static, Pg, ResourceTable, V2>, + >, + // Allows using "id" in expressions (e.g. ".eq(...)") with... Self::Id: diesel::expression::AsExpression< // ... The Collection table's PK @@ -292,6 +313,8 @@ pub trait DatastoreAttachTarget : Selectable { // - Check against the primary key of the target objects // - Ensure the objects are not deleted // - (for the resource) Ensure it is not already attached + // - (for the update) Ensure that only the resource with "resource_id" + // is modified. let collection_query = Box::new( collection_query .filter(collection_table().primary_key().eq(collection_id)) @@ -304,6 +327,9 @@ pub trait DatastoreAttachTarget : Selectable { .filter(Self::ResourceCollectionIdColumn::default().is_null()) ); + let update_resource_statement = update.into_boxed() + .filter(resource_table().primary_key().eq(resource_id)); + let collection_from_clause = collection_table().from_clause(); let collection_returning_clause = Self::as_returning(); let resource_returning_clause = ResourceType::as_returning(); @@ -314,7 +340,7 @@ pub trait DatastoreAttachTarget : Selectable { collection_query, resource_query, max_attached_resources, - update_resource_statement: update, + update_resource_statement, collection_from_clause, collection_returning_clause, resource_returning_clause, @@ -325,7 +351,7 @@ pub trait DatastoreAttachTarget : Selectable { /// The CTE described in the module docs #[must_use = "Queries must be executed"] -pub struct AttachToCollectionStatement +pub struct AttachToCollectionStatement where ResourceType: Selectable, C: DatastoreAttachTarget, @@ -345,15 +371,15 @@ where max_attached_resources: usize, // Update statement for the resource. - update_resource_statement: UpdateStatement, U, V>, + update_resource_statement: BoxedUpdateStatement<'static, Pg, ResourceTable, V>, collection_from_clause: as QuerySource>::FromClause, collection_returning_clause: AsSelect, resource_returning_clause: AsSelect, query_type: PhantomData, } -impl QueryId - for AttachToCollectionStatement +impl QueryId + for AttachToCollectionStatement where ResourceType: Selectable, C: DatastoreAttachTarget, @@ -404,14 +430,13 @@ pub enum SyncAttachError { pub type RawOutput = (i64, Option, Option, Option, Option); -impl AttachToCollectionStatement +impl AttachToCollectionStatement where ResourceType: 'static + Debug + Send + Selectable, C: 'static + Debug + DatastoreAttachTarget + Send, ResourceTable: 'static + Table + Send + Copy + Debug, - U: 'static + Send, V: 'static + Send, - AttachToCollectionStatement: Send, + AttachToCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. /// @@ -524,8 +549,8 @@ where type SelectableSqlType = <>::SelectExpression as Expression>::SqlType; -impl Query - for AttachToCollectionStatement +impl Query + for AttachToCollectionStatement where ResourceType: Selectable, C: DatastoreAttachTarget, @@ -544,8 +569,8 @@ where ); } -impl RunQueryDsl - for AttachToCollectionStatement +impl RunQueryDsl + for AttachToCollectionStatement where ResourceType: Selectable, C: DatastoreAttachTarget, @@ -664,8 +689,8 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// for the "dummy" table, preventing the division-by-zero error from occuring. /// The MATERIALIZED keyword forces the queries that are not referenced /// to be materialized instead. -impl QueryFragment - for AttachToCollectionStatement +impl QueryFragment + for AttachToCollectionStatement where ResourceType: Selectable, C: DatastoreAttachTarget, @@ -674,7 +699,7 @@ where as QuerySource>::FromClause: QueryFragment, // Necessary to "walk_ast" over "self.update_resource_statement". - UpdateStatement, U, V>: QueryFragment, + BoxedUpdateStatement<'static, Pg, ResourceTable, V>: QueryFragment, // Necessary to "walk_ast" over "self.resource_returning_clause". AsSelect: QueryFragment, // Necessary to "walk_ast" over "self.collection_returning_clause". @@ -731,7 +756,9 @@ where // TODO: Check or force the update_resource_statement to have // C::ResourceCollectionIdColumn set self.update_resource_statement.walk_ast(out.reborrow())?; - // TODO: Is this safe? There must be a WHERE clause for this to work. + + // NOTE: It is safe to start with "AND" - we forced the update statement + // to have a WHERE clause on the primary key of the resource. out.push_sql(" AND (SELECT * FROM do_update)"); out.push_sql(" RETURNING "); self.resource_returning_clause.walk_ast(out.reborrow())?; @@ -942,7 +969,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 12345, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ); let query = diesel::debug_query::(&attach).to_string(); @@ -1085,7 +1112,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1101,7 +1128,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ); @@ -1156,7 +1183,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1193,7 +1220,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1242,7 +1269,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), RESOURCE_COUNT, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1289,7 +1316,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 1, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id1))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1305,7 +1332,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 1, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id1))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1349,7 +1376,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1363,7 +1390,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1394,7 +1421,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 1, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) @@ -1455,7 +1482,7 @@ mod test { // // This provides an example of how one could attach an ID and update // the state of a resource simultaneously. - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) + diesel::update(resource::table.filter(resource::name.eq("resource"))) .set(( resource::dsl::collection_id.eq(collection_id), resource::dsl::description.eq("new description".to_string()) @@ -1508,8 +1535,7 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) - .set(resource::dsl::collection_id.eq(collection_id)) + diesel::update(resource::table).set(resource::dsl::collection_id.eq(collection_id)) ) .attach_and_get_result_async(pool.pool()) .await; @@ -1561,6 +1587,8 @@ mod test { logctx.cleanup_successful(); } + // TODO: What if the filter is different in the resource vs update calls? + // TODO: test no filter in update? // TODO: Try to break things // TODO: Sync API From 6e8214b32c51b3612c1e7e8ba98ffb2efe2a2236 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 16:27:09 -0400 Subject: [PATCH 09/29] polishing docs --- nexus/src/db/collection_attach.rs | 135 +++++++++++++++++------------- 1 file changed, 78 insertions(+), 57 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index a3a7a03811c..480b81fe966 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -6,6 +6,10 @@ //! //! This atomically: //! - Checks if the collection exists and is not soft deleted +//! - Checks if the resource exists and is not soft deleted +//! - Validates conditions on both the collection and resource +//! - Ensures the number of attached resources does not exceed +//! a provided threshold //! - Updates the collection's resource generation number //! - Updates the resource row @@ -23,7 +27,6 @@ use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; use diesel::sql_types::{BigInt, Nullable, SingleValue}; use std::fmt::Debug; -use std::marker::PhantomData; /// The table representing the collection. The resource references /// this table. @@ -35,6 +38,8 @@ type CollectionTable = < = <>::ResourceCollectionIdColumn as Column>::Table; +type ResourceTableWhereClause = as IntoUpdateTarget>::WhereClause; + type CollectionGenerationColumn = >::CollectionGenerationColumn; type CollectionIdColumn = @@ -123,15 +128,30 @@ pub trait DatastoreAttachTarget : Selectable { /// The time deleted column in the ResourceTable type ResourceTimeDeletedColumn: Column + Default; - /// Create a statement for attaching a resource to the given collection. + /// Creates a statement for attaching a resource to the given collection. + /// + /// This statement allows callers to atomically check the state of a + /// collection and a resource while attaching a resource to a collection. /// - /// The U, V types are the same type as the 3rd and 4th generic arguments to - /// UpdateStatement, and should generally be inferred rather than explicitly - /// specified. + /// - `collection_id`: Primary key of the collection being inserted into. + /// - `resource_id`: Primary key of the resource being attached. + /// - `collection_query`: An optional query for collection state. The + /// CTE will automatically filter this query to `collection_id`, and + /// validate that the "time deleted" column is NULL. + /// - `resource_query`: An optional query for the resource state. The + /// CTE will automatically filter this query to `resource_id`, + /// validate that the "time deleted" column is NULL, and validate that the + /// "collection_id" column is NULL. + /// - `max_attached_resources`: The maximum number of non-deleted + /// resources which are permitted to have their "collection_id" column + /// set to the value of `collection_id`. If attaching `resource_id` would + /// cross this threshold, the update is aborted. + /// - `update`: An update statement, identifying how the resource object + /// should be modified to be attached. /// - /// CAUTION: The API does not currently enforce that `key` matches the value - /// of the collection id within the attached row. - fn attach_resource( + /// The V, V2 types refer to the "update targets" of the UpdateStatement, + /// and should generally be inferred rather than explicitly specified. + fn attach_resource( collection_id: Self::Id, resource_id: Self::Id, @@ -140,24 +160,35 @@ pub trait DatastoreAttachTarget : Selectable { max_attached_resources: usize, - // Note that UpdateStatement's fourth argument defaults to Ret = + // We are intentionally picky about this update statement: + // - The second argument - the WHERE clause - must match the default + // for the table. This encourages the "resource_query" filter to be + // used instead, and makes it possible for the CTE to modify the + // filter here (ensuring "resource_id" is selected). + // - Additionally, UpdateStatement's fourth argument defaults to Ret = // NoReturningClause. This enforces that the given input statement does - // not have a RETURNING clause. - update: UpdateStatement, U, V>, + // not have a RETURNING clause, and also lets the CTE control this + // value. + update: UpdateStatement< + ResourceTable, + ResourceTableWhereClause, + V>, ) -> AttachToCollectionStatement where + // Ensure the "collection" columns all belong to the same table. ( ::Table, ::Table, ::Table, ): TypesAreSame, + // Ensure the "resource" columns all belong to the same table. ( ::Table, ::Table, ::Table, ): TypesAreSame, Self: Sized, - // Enables the "table()" method. + // Enables the "table()" method on the Collection. CollectionTable: HasTable
> + 'static + Send @@ -168,7 +199,7 @@ pub trait DatastoreAttachTarget : Selectable { Pg, Output = BoxedDslOutput>, >, - // Enables the "table()" method. + // Enables the "table()" method on the Resource. ResourceTable: HasTable
> + 'static + Send @@ -189,34 +220,41 @@ pub trait DatastoreAttachTarget : Selectable { as AsQuery>::SqlType: Send, // Allows sending "resource_exists_query" between threads. as AsQuery>::SqlType: Send, - // Allows calling ".filter()" on the boxed table. + // Allows calling ".filter()" on the boxed collection table. BoxedQuery>: + // Filter by primary key query_methods::FilterDsl< - Eq< - CollectionPrimaryKey, - Self::Id, - >, - Output = BoxedQuery>, - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, + Eq< + CollectionPrimaryKey, + Self::Id, >, + Output = BoxedQuery>, + // Filter by time deleted = NULL + > + query_methods::FilterDsl< + IsNull, + Output = BoxedQuery>, + >, + // Allows calling ".filter()" on the boxed resource table. BoxedQuery>: + // Filter by primary key query_methods::FilterDsl< Eq< ResourcePrimaryKey, Self::Id, >, Output = BoxedQuery>, + // Filter by collection ID (when counting attached resources) > + query_methods::FilterDsl< Eq< Self::ResourceCollectionIdColumn, Self::Id, >, Output = BoxedQuery>, + // Filter by collection ID = NULL > + query_methods::FilterDsl< IsNull, Output = BoxedQuery>, + // Filter by time deleted = NULL > + query_methods::FilterDsl< IsNull, Output = BoxedQuery>, @@ -224,8 +262,11 @@ pub trait DatastoreAttachTarget : Selectable { // See: "update_resource_statement". // + // Allows referencing the default "WHERE" clause of the update + // statement. + ResourceTable: IntoUpdateTarget, // Allows calling "update.into_boxed()" - UpdateStatement, U, V>: + UpdateStatement, ResourceTableWhereClause, V>: query_methods::BoxedDsl< 'static, Pg, @@ -258,21 +299,16 @@ pub trait DatastoreAttachTarget : Selectable { SingleValue, as Expression>::SqlType: SingleValue, + ::SqlType: + SingleValue, - // Allows calling "is_null()" on the time deleted column. + // Allows calling "is_null()" on the following columns. Self::CollectionTimeDeletedColumn: ExpressionMethods, Self::ResourceTimeDeletedColumn: ExpressionMethods, Self::ResourceCollectionIdColumn: ExpressionMethods, - Self::CollectionGenerationColumn: ExpressionMethods, - // Necessary for output type (`AttachToCollectionStatement`). + // Necessary to actually select the resource in the output type. ResourceType: Selectable, - - // XXX ? - ::SqlType: - SingleValue, - ::SqlType: - SingleValue, { let collection_table = || { as HasTable>::table() @@ -297,7 +333,7 @@ pub trait DatastoreAttachTarget : Selectable { ); // Additionally, construct a new query to count the number of - // already attached instances. + // already attached resources. let resource_count_query = Box::new( resource_table() .into_boxed() @@ -306,7 +342,7 @@ pub trait DatastoreAttachTarget : Selectable { .count() ); - // However, for the queries which decide whether or not we'll update, + // For the queries which decide whether or not we'll perform the update, // extend the user-provided arguments. // // We force these queries to: @@ -344,7 +380,6 @@ pub trait DatastoreAttachTarget : Selectable { collection_from_clause, collection_returning_clause, resource_returning_clause, - query_type: PhantomData, } } } @@ -372,10 +407,12 @@ where // Update statement for the resource. update_resource_statement: BoxedUpdateStatement<'static, Pg, ResourceTable, V>, + // Describes the target of the collection table UPDATE. collection_from_clause: as QuerySource>::FromClause, + // Describes what should be returned after UPDATE-ing the collection. collection_returning_clause: AsSelect, + // Describes what should be returned after UPDATE-ing the resource. resource_returning_clause: AsSelect, - query_type: PhantomData, } impl QueryId @@ -393,7 +430,7 @@ pub type AsyncAttachToCollectionResult = Result = Result; +pub type SyncAttachToCollectionResult = Result>; */ /// Errors returned by [`AttachToCollectionStatement`]. @@ -428,6 +465,8 @@ pub enum SyncAttachError { } */ +/// Describes the type returned from the actual CTE, which is parsed +/// and interpreted before propagating it to users of the Rust API. pub type RawOutput = (i64, Option, Option, Option, Option); impl AttachToCollectionStatement @@ -439,11 +478,6 @@ where AttachToCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. - /// - /// The three outcomes are: - /// - Ok(new row) - /// - Error(collection not found) - /// - Error(other diesel error) pub async fn attach_and_get_result_async( self, pool: &bb8::Pool>, @@ -455,10 +489,7 @@ where self.get_result_async::>(pool) .await // If the database returns an error, propagate it right away. - .map_err(|e| { - eprintln!("ERROR from DB - not parsing result"); - AttachError::DatabaseError(e) - }) + .map_err(AttachError::DatabaseError) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(Self::parse_result) } @@ -466,11 +497,6 @@ where /* /// Issues the CTE synchronously and parses the result. - /// - /// The three outcomes are: - /// - Ok(new row) - /// - Error(collection not found) - /// - Error(other diesel error) pub fn attach_and_get_result( self, conn: &mut DbConnection, @@ -489,8 +515,6 @@ where fn parse_result( result: RawOutput, ) -> Result> { - eprintln!("Parsing DB result: {:?}", result); - let ( attached_count, collection_before_update, @@ -753,10 +777,7 @@ where out.push_sql("), "); out.push_sql("updated_resource AS ("); - // TODO: Check or force the update_resource_statement to have - // C::ResourceCollectionIdColumn set self.update_resource_statement.walk_ast(out.reborrow())?; - // NOTE: It is safe to start with "AND" - we forced the update statement // to have a WHERE clause on the primary key of the resource. out.push_sql(" AND (SELECT * FROM do_update)"); @@ -1482,7 +1503,7 @@ mod test { // // This provides an example of how one could attach an ID and update // the state of a resource simultaneously. - diesel::update(resource::table.filter(resource::name.eq("resource"))) + diesel::update(resource::table) .set(( resource::dsl::collection_id.eq(collection_id), resource::dsl::description.eq("new description".to_string()) From ba1acc56fc31d4e42848e59676360f724faf59df Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 16:42:13 -0400 Subject: [PATCH 10/29] more cleanup --- nexus/src/app/instance.rs | 1 - nexus/src/app/sagas/disk_attach.rs | 1 - nexus/src/db/collection_attach.rs | 566 ++++++++++++++--------------- 3 files changed, 267 insertions(+), 301 deletions(-) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 6c094013f2b..4dab9af3f4f 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -700,7 +700,6 @@ impl super::Nexus { // TODO this will probably involve volume construction requests as // well! InstanceState::Running | InstanceState::Starting => { - // TODO: set state as "attaching". // TODO: also can we check rcgens diff --git a/nexus/src/app/sagas/disk_attach.rs b/nexus/src/app/sagas/disk_attach.rs index 402493c8693..53502a23008 100644 --- a/nexus/src/app/sagas/disk_attach.rs +++ b/nexus/src/app/sagas/disk_attach.rs @@ -114,7 +114,6 @@ async fn sda_update_sled_agent( async fn sda_update_sled_agent_undo( _sagactx: ActionContext, ) -> Result<(), anyhow::Error> { - // TODO: Undo the "disk_put". todo!(); } diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 480b81fe966..de26c3e1b78 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -14,9 +14,7 @@ //! - Updates the resource row use super::pool::DbConnection; -use async_bb8_diesel::{ - AsyncRunQueryDsl, ConnectionManager, PoolError, -}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; use diesel::associations::HasTable; use diesel::expression::Expression; use diesel::helper_types::*; @@ -38,8 +36,9 @@ type CollectionTable = < = <>::ResourceCollectionIdColumn as Column>::Table; -type ResourceTableWhereClause = as IntoUpdateTarget>::WhereClause; - +/// The default WHERE clause of the resource table. +type ResourceTableWhereClause = + as IntoUpdateTarget>::WhereClause; type CollectionGenerationColumn = >::CollectionGenerationColumn; type CollectionIdColumn = @@ -105,7 +104,7 @@ impl TypesAreSame for (T, T, T) {} /// type ResourceTimeDeletedColumn = disk::dsl::time_deleted; /// } /// ``` -pub trait DatastoreAttachTarget : Selectable { +pub trait DatastoreAttachTarget: Selectable { /// The Rust type of the collection and resource ids (typically Uuid). type Id: Copy + Debug + PartialEq + Send + 'static; @@ -114,7 +113,9 @@ pub trait DatastoreAttachTarget : Selectable { /// The column in the CollectionTable that acts as a generation number. /// This is the "resource-generation-number" in RFD 192. - type CollectionGenerationColumn: Column + Default + Expression; + type CollectionGenerationColumn: Column + + Default + + Expression; /// The time deleted column in the CollectionTable type CollectionTimeDeletedColumn: Column + Default; @@ -149,9 +150,9 @@ pub trait DatastoreAttachTarget : Selectable { /// - `update`: An update statement, identifying how the resource object /// should be modified to be attached. /// - /// The V, V2 types refer to the "update targets" of the UpdateStatement, + /// The V type refers to the "update target" of the UpdateStatement, /// and should generally be inferred rather than explicitly specified. - fn attach_resource( + fn attach_resource( collection_id: Self::Id, resource_id: Self::Id, @@ -172,8 +173,9 @@ pub trait DatastoreAttachTarget : Selectable { update: UpdateStatement< ResourceTable, ResourceTableWhereClause, - V>, - ) -> AttachToCollectionStatement + V, + >, + ) -> AttachToCollectionStatement where // Ensure the "collection" columns all belong to the same table. ( @@ -222,39 +224,27 @@ pub trait DatastoreAttachTarget : Selectable { as AsQuery>::SqlType: Send, // Allows calling ".filter()" on the boxed collection table. BoxedQuery>: - // Filter by primary key query_methods::FilterDsl< - Eq< - CollectionPrimaryKey, - Self::Id, + Eq, Self::Id>, + Output = BoxedQuery>, + // Filter by time deleted = NULL + > + query_methods::FilterDsl< + IsNull, + Output = BoxedQuery>, >, - Output = BoxedQuery>, - // Filter by time deleted = NULL - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, - >, // Allows calling ".filter()" on the boxed resource table. - BoxedQuery>: - // Filter by primary key - query_methods::FilterDsl< - Eq< - ResourcePrimaryKey, - Self::Id, - >, + BoxedQuery>: query_methods::FilterDsl< + Eq, Self::Id>, Output = BoxedQuery>, - // Filter by collection ID (when counting attached resources) + // Filter by collection ID (when counting attached resources) > + query_methods::FilterDsl< - Eq< - Self::ResourceCollectionIdColumn, - Self::Id, - >, + Eq, Output = BoxedQuery>, - // Filter by collection ID = NULL + // Filter by collection ID = NULL > + query_methods::FilterDsl< IsNull, Output = BoxedQuery>, - // Filter by time deleted = NULL + // Filter by time deleted = NULL > + query_methods::FilterDsl< IsNull, Output = BoxedQuery>, @@ -266,41 +256,50 @@ pub trait DatastoreAttachTarget : Selectable { // statement. ResourceTable: IntoUpdateTarget, // Allows calling "update.into_boxed()" - UpdateStatement, ResourceTableWhereClause, V>: - query_methods::BoxedDsl< + UpdateStatement< + ResourceTable, + ResourceTableWhereClause, + V, + >: query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement< 'static, Pg, - Output = BoxedUpdateStatement<'static, Pg, ResourceTable, V>, - >, + ResourceTable, + V, + >, + >, // Allows calling // ".filter(resource_table().primary_key().eq(resource_id)" on the // boxed update statement. BoxedUpdateStatement<'static, Pg, ResourceTable, V>: - query_methods::FilterDsl< - Eq< - ResourcePrimaryKey, - Self::Id, + query_methods::FilterDsl< + Eq, Self::Id>, + Output = BoxedUpdateStatement< + 'static, + Pg, + ResourceTable, + V, >, - Output = BoxedUpdateStatement<'static, Pg, ResourceTable, V2>, >, // Allows using "id" in expressions (e.g. ".eq(...)") with... Self::Id: diesel::expression::AsExpression< - // ... The Collection table's PK - SerializedCollectionPrimaryKey, - > + diesel::expression::AsExpression< - // ... The Resource table's PK - SerializedResourcePrimaryKey, - > + diesel::expression::AsExpression< - // ... The Resource table's FK to the Collection table - SerializedResourceForeignKey - >, + // ... The Collection table's PK + SerializedCollectionPrimaryKey, + > + diesel::expression::AsExpression< + // ... The Resource table's PK + SerializedResourcePrimaryKey, + > + diesel::expression::AsExpression< + // ... The Resource table's FK to the Collection table + SerializedResourceForeignKey, + >, as Expression>::SqlType: SingleValue, as Expression>::SqlType: SingleValue, - ::SqlType: - SingleValue, + ::SqlType: SingleValue, // Allows calling "is_null()" on the following columns. Self::CollectionTimeDeletedColumn: ExpressionMethods, @@ -310,12 +309,10 @@ pub trait DatastoreAttachTarget : Selectable { // Necessary to actually select the resource in the output type. ResourceType: Selectable, { - let collection_table = || { - as HasTable>::table() - }; - let resource_table = || { - as HasTable>::table() - }; + let collection_table = + || as HasTable>::table(); + let resource_table = + || as HasTable>::table(); // Create new queries to determine if the collection and resources // already exist. @@ -323,13 +320,13 @@ pub trait DatastoreAttachTarget : Selectable { collection_table() .into_boxed() .filter(collection_table().primary_key().eq(collection_id)) - .filter(Self::CollectionTimeDeletedColumn::default().is_null()) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()), ); let resource_exists_query = Box::new( resource_table() .into_boxed() .filter(resource_table().primary_key().eq(resource_id)) - .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + .filter(Self::ResourceTimeDeletedColumn::default().is_null()), ); // Additionally, construct a new query to count the number of @@ -337,9 +334,12 @@ pub trait DatastoreAttachTarget : Selectable { let resource_count_query = Box::new( resource_table() .into_boxed() - .filter(Self::ResourceCollectionIdColumn::default().eq(collection_id)) + .filter( + Self::ResourceCollectionIdColumn::default() + .eq(collection_id), + ) .filter(Self::ResourceTimeDeletedColumn::default().is_null()) - .count() + .count(), ); // For the queries which decide whether or not we'll perform the update, @@ -354,20 +354,20 @@ pub trait DatastoreAttachTarget : Selectable { let collection_query = Box::new( collection_query .filter(collection_table().primary_key().eq(collection_id)) - .filter(Self::CollectionTimeDeletedColumn::default().is_null()) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()), ); let resource_query = Box::new( resource_query .filter(resource_table().primary_key().eq(resource_id)) .filter(Self::ResourceTimeDeletedColumn::default().is_null()) - .filter(Self::ResourceCollectionIdColumn::default().is_null()) + .filter(Self::ResourceCollectionIdColumn::default().is_null()), ); - let update_resource_statement = update.into_boxed() + let update_resource_statement = update + .into_boxed() .filter(resource_table().primary_key().eq(resource_id)); let collection_from_clause = collection_table().from_clause(); - let collection_returning_clause = Self::as_returning(); let resource_returning_clause = ResourceType::as_returning(); AttachToCollectionStatement { collection_exists_query, @@ -378,7 +378,6 @@ pub trait DatastoreAttachTarget : Selectable { max_attached_resources, update_resource_statement, collection_from_clause, - collection_returning_clause, resource_returning_clause, } } @@ -406,11 +405,11 @@ where max_attached_resources: usize, // Update statement for the resource. - update_resource_statement: BoxedUpdateStatement<'static, Pg, ResourceTable, V>, + update_resource_statement: + BoxedUpdateStatement<'static, Pg, ResourceTable, V>, // Describes the target of the collection table UPDATE. - collection_from_clause: as QuerySource>::FromClause, - // Describes what should be returned after UPDATE-ing the collection. - collection_returning_clause: AsSelect, + collection_from_clause: + as QuerySource>::FromClause, // Describes what should be returned after UPDATE-ing the resource. resource_returning_clause: AsSelect, } @@ -426,7 +425,8 @@ where } /// Result of [`AttachToCollectionStatement`] when executed asynchronously -pub type AsyncAttachToCollectionResult = Result>; +pub type AsyncAttachToCollectionResult = + Result>; /* /// Result of [`AttachToCollectionStatement`] when executed synchronously @@ -445,11 +445,7 @@ pub enum AttachError { /// The unchanged resource and collection are returned as a part of this /// error; it is the responsibility of the caller to determine which /// condition was not met. - NoUpdate { - attached_count: i64, - resource: ResourceType, - collection: C, - }, + NoUpdate { attached_count: i64, resource: ResourceType, collection: C }, /// Other database error DatabaseError(PoolError), } @@ -467,7 +463,8 @@ pub enum SyncAttachError { /// Describes the type returned from the actual CTE, which is parsed /// and interpreted before propagating it to users of the Rust API. -pub type RawOutput = (i64, Option, Option, Option, Option); +pub type RawOutput = + (i64, Option, Option, Option); impl AttachToCollectionStatement where @@ -484,7 +481,11 @@ where ) -> AsyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. - Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, + Self: query_methods::LoadQuery< + 'static, + DbConnection, + RawOutput, + >, { self.get_result_async::>(pool) .await @@ -519,26 +520,22 @@ where attached_count, collection_before_update, resource_before_update, - collection_after_update, - resource_after_update + resource_after_update, ) = result; - let collection_before_update = - collection_before_update.ok_or_else(|| AttachError::CollectionNotFound)?; + let collection_before_update = collection_before_update + .ok_or_else(|| AttachError::CollectionNotFound)?; - let resource_before_update = - resource_before_update.ok_or_else(|| AttachError::ResourceNotFound)?; + let resource_before_update = resource_before_update + .ok_or_else(|| AttachError::ResourceNotFound)?; - match (collection_after_update, resource_after_update) { - (Some(_), Some(resource)) => Ok(resource), - (None, None) => { - Err(AttachError::NoUpdate { - attached_count, - resource: resource_before_update, - collection: collection_before_update - }) - } - _ => panic!("Partial update applied - This is a CTE bug"), + match resource_after_update { + Some(resource) => Ok(resource), + None => Err(AttachError::NoUpdate { + attached_count, + resource: resource_before_update, + collection: collection_before_update, + }), } } @@ -586,8 +583,6 @@ where Nullable>, // If the resource exists, the value before update. Nullable>, - // If the collection was updated, the new value. - Nullable>, // If the resource was updated, the new value. Nullable>, ); @@ -606,9 +601,8 @@ type CollectionPrimaryKey = as Table>::PrimaryKey; type ResourcePrimaryKey = as Table>::PrimaryKey; -type ResourceForeignKey = >::ResourceCollectionIdColumn; +type ResourceForeignKey = + >::ResourceCollectionIdColumn; // Representation of Primary Key in SQL. type SerializedCollectionPrimaryKey = @@ -628,7 +622,21 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< Pg, >; -/// This implementation uses the following CTE: +/// This implementation uses a CTE which attempts to do the following: +/// +/// 1. (collection_by_id, resource_by_id): Identify if the collection and +/// resource objects exist at all. +/// 2. (resource_count): Identify if the number of resources already attached to +/// the collection exceeds a threshold. +/// 3. (collection_info, resource_info): Checks for arbitrary user-provided +/// constraints on the collection and resource objects. +/// 4. (do_update): IFF all previous checks succeeded, make a decision to perfom +/// an update. +/// 5. (updated_collection): Increase the generation number on the collection. +/// 6. (updated_resource): Apply user-provided updates on the resource - +/// presumably, setting the collection ID value. +/// +/// This is implemented as follows: /// /// ```text /// // WITH @@ -672,10 +680,10 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // TRUE, FALSE), /// // ), /// // /* Update the generation number of the collection row */ -/// // updated_collection AS ( +/// // updated_collection AS MATERIALIZED ( /// // UPDATE C SET = + 1 /// // WHERE IN (SELECT FROM collection_info) AND (SELECT * FROM do_update) -/// // RETURNING * +/// // RETURNING 1 /// // ), /// // /* Update the resource */ /// // updated_resource AS ( @@ -687,32 +695,8 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // (SELECT * FROM resource_count) /// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE /// // LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE -/// // LEFT JOIN (SELECT * FROM updated_collection) ON TRUE /// // LEFT JOIN (SELECT * FROM resource) ON TRUE; /// ``` -/// -/// This CTE is similar in desired behavior to the one specified in -/// [RFD 192](https://rfd.shared.oxide.computer/rfd/0192#_dueling_administrators), -/// but tuned to the case of modifying an associated resource. -/// -/// The general idea is that the first clause of the CTE (the "dummy" table) -/// will generate a division-by-zero error and rollback the transaction if the -/// target collection is not found in its table. It simultaneously locks the -/// row for update, to allow us to subsequently use the "updated_collection_row" query to -/// increase the resource generation count for the collection. In the same -/// transaction, it performs the provided update statement, which should -/// update the resource, referencing the collection ID to the collection -/// collection we just checked for. -/// -/// NOTE: It is important that the WHERE clauses on the SELECT and UPDATE -/// against the collection table must match, or else we will not get the desired -/// behavior described in RFD 192. -/// NOTE: It is important that the WITH clauses have MATERIALIZED, since under -/// some conditions, clauses may be inlined (and potentially eliminated by -/// consequence of being unused). At the time of writing this, this happens -/// for the "dummy" table, preventing the division-by-zero error from occuring. -/// The MATERIALIZED keyword forces the queries that are not referenced -/// to be materialized instead. impl QueryFragment for AttachToCollectionStatement where @@ -723,11 +707,10 @@ where as QuerySource>::FromClause: QueryFragment, // Necessary to "walk_ast" over "self.update_resource_statement". - BoxedUpdateStatement<'static, Pg, ResourceTable, V>: QueryFragment, + BoxedUpdateStatement<'static, Pg, ResourceTable, V>: + QueryFragment, // Necessary to "walk_ast" over "self.resource_returning_clause". AsSelect: QueryFragment, - // Necessary to "walk_ast" over "self.collection_returning_clause". - AsSelect: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); @@ -760,20 +743,23 @@ where self.max_attached_resources) ); - out.push_sql("updated_collection AS (UPDATE "); + out.push_sql("updated_collection AS MATERIALIZED (UPDATE "); self.collection_from_clause.walk_ast(out.reborrow())?; out.push_sql(" SET "); - out.push_identifier(CollectionGenerationColumn::::NAME)?; + out.push_identifier( + CollectionGenerationColumn::::NAME, + )?; out.push_sql(" = "); - out.push_identifier(CollectionGenerationColumn::::NAME)?; + out.push_identifier( + CollectionGenerationColumn::::NAME, + )?; out.push_sql(" + 1 WHERE "); out.push_identifier(CollectionPrimaryKey::::NAME)?; out.push_sql(" IN (SELECT "); out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql(" FROM collection_info) AND (SELECT * FROM do_update) RETURNING "); - // TODO: You don't actually need to return anything here. We only care - // about the inserted resource... - self.collection_returning_clause.walk_ast(out.reborrow())?; + out.push_sql( + " FROM collection_info) AND (SELECT * FROM do_update) RETURNING 1", + ); out.push_sql("), "); out.push_sql("updated_resource AS ("); @@ -804,12 +790,13 @@ where // By returning a static number of columns, each component of the // "RawOutput" tuple can be parsed, regardless of nullability, without // preventing later portions of the result from being parsed. - out.push_sql("SELECT * FROM \ + out.push_sql( + "SELECT * FROM \ (SELECT * FROM resource_count) \ LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ - LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ - LEFT JOIN (SELECT * FROM updated_resource) ON TRUE;"); + LEFT JOIN (SELECT * FROM updated_resource) ON TRUE;", + ); Ok(()) } @@ -892,7 +879,9 @@ mod test { } /// Describes a resource within the database. - #[derive(Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq)] + #[derive( + Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq, + )] #[diesel(table_name = resource)] struct Resource { #[diesel(embed)] @@ -900,7 +889,9 @@ mod test { pub collection_id: Option, } - #[derive(Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq)] + #[derive( + Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq, + )] #[diesel(table_name = collection)] struct Collection { #[diesel(embed)] @@ -920,7 +911,11 @@ mod test { type ResourceTimeDeletedColumn = resource::dsl::time_deleted; } - async fn insert_collection(id: Uuid, name: &str, pool: &db::Pool) -> Collection { + async fn insert_collection( + id: Uuid, + name: &str, + pool: &db::Pool, + ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), description: "description".to_string(), @@ -948,7 +943,11 @@ mod test { .unwrap() } - async fn insert_resource(id: Uuid, name: &str, pool: &db::Pool) -> Resource { + async fn insert_resource( + id: Uuid, + name: &str, + pool: &db::Pool, + ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), description: "description".to_string(), @@ -991,7 +990,7 @@ mod test { resource::table.into_boxed(), 12345, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ); let query = diesel::debug_query::(&attach).to_string(); @@ -1073,7 +1072,7 @@ mod test { TRUE,\ FALSE)\ ), \ - updated_collection AS (\ + updated_collection AS MATERIALIZED (\ UPDATE \ \"test_schema\".\"collection\" \ SET \ @@ -1081,14 +1080,7 @@ mod test { WHERE \ \"id\" IN (SELECT \"id\" FROM collection_info) AND \ (SELECT * FROM do_update) \ - RETURNING \ - \"test_schema\".\"collection\".\"id\", \ - \"test_schema\".\"collection\".\"name\", \ - \"test_schema\".\"collection\".\"description\", \ - \"test_schema\".\"collection\".\"time_created\", \ - \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\"\ + RETURNING 1\ ), \ updated_resource AS (\ UPDATE \ @@ -1111,14 +1103,14 @@ mod test { (SELECT * FROM resource_count) \ LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ - LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; assert_eq!(query, expected_query); } #[tokio::test] async fn test_attach_missing_collection_fails() { - let logctx = dev::test_setup_log("test_attach_missing_collection_fails"); + let logctx = + dev::test_setup_log("test_attach_missing_collection_fails"); let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&cfg); @@ -1134,7 +1126,7 @@ mod test { resource::table.into_boxed(), 10, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1195,7 +1187,8 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection - let collection = insert_collection(collection_id, "collection", &pool).await; + let collection = + insert_collection(collection_id, "collection", &pool).await; // Attempt to attach - even though the resource does not exist. let attach = Collection::attach_resource( @@ -1205,7 +1198,7 @@ mod test { resource::table.into_boxed(), 10, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1231,7 +1224,8 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. - let collection = insert_collection(collection_id, "collection", &pool).await; + let collection = + insert_collection(collection_id, "collection", &pool).await; let _resource = insert_resource(resource_id, "resource", &pool).await; // Attach the resource to the collection. @@ -1242,7 +1236,7 @@ mod test { resource::table.into_boxed(), 10, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1256,7 +1250,10 @@ mod test { // The returned resource value should be the latest value in the DB. assert_eq!(returned_resource, get_resource(resource_id, &pool).await); // The generation number should have incremented in the collection. - assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + assert_eq!( + collection.rcgen + 1, + get_collection(collection_id, &pool).await.rcgen + ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1276,12 +1273,14 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Create the collection. - let collection = insert_collection(collection_id, "collection", &pool).await; + let collection = + insert_collection(collection_id, "collection", &pool).await; // Create each resource, attaching them to the collection. for i in 0..RESOURCE_COUNT { let resource_id = uuid::Uuid::new_v4(); - insert_resource(resource_id, &format!("resource{}", i), &pool).await; + insert_resource(resource_id, &format!("resource{}", i), &pool) + .await; // Attach the resource to the collection. let attach = Collection::attach_resource( @@ -1291,7 +1290,7 @@ mod test { resource::table.into_boxed(), RESOURCE_COUNT, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1299,11 +1298,16 @@ mod test { // "attach_and_get_result_async" should return the "attached" resource. let returned_resource = attach.expect("Attach should have worked"); assert_eq!( - returned_resource.collection_id.expect("Expected a collection ID"), + returned_resource + .collection_id + .expect("Expected a collection ID"), collection_id ); // The returned resource value should be the latest value in the DB. - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!( + returned_resource, + get_resource(resource_id, &pool).await + ); // The generation number should have incremented in the collection. assert_eq!( @@ -1328,7 +1332,8 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. - let collection = insert_collection(collection_id, "collection", &pool).await; + let collection = + insert_collection(collection_id, "collection", &pool).await; let resource_id1 = uuid::Uuid::new_v4(); let _resource = insert_resource(resource_id1, "resource1", &pool).await; let attach = Collection::attach_resource( @@ -1338,11 +1343,14 @@ mod test { resource::table.into_boxed(), 1, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; - assert_eq!(attach.expect("Attach should have worked").id(), resource_id1); + assert_eq!( + attach.expect("Attach should have worked").id(), + resource_id1 + ); // Let's try attaching a second resource, now that we're at capacity. let resource_id2 = uuid::Uuid::new_v4(); @@ -1354,7 +1362,7 @@ mod test { resource::table.into_boxed(), 1, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1364,13 +1372,19 @@ mod test { AttachError::NoUpdate { attached_count, resource, collection } => { assert_eq!(attached_count, 1); assert_eq!(resource, get_resource(resource_id2, &pool).await); - assert_eq!(collection, get_collection(collection_id, &pool).await); - }, + assert_eq!( + collection, + get_collection(collection_id, &pool).await + ); + } _ => panic!("Unexpected error: {:?}", err), }; // The generation number should only have bumped once. - assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + assert_eq!( + collection.rcgen + 1, + get_collection(collection_id, &pool).await.rcgen + ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1388,7 +1402,8 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. - let collection = insert_collection(collection_id, "collection", &pool).await; + let collection = + insert_collection(collection_id, "collection", &pool).await; let resource_id = uuid::Uuid::new_v4(); let _resource = insert_resource(resource_id, "resource", &pool).await; let attach = Collection::attach_resource( @@ -1398,11 +1413,14 @@ mod test { resource::table.into_boxed(), 10, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; - assert_eq!(attach.expect("Attach should have worked").id(), resource_id); + assert_eq!( + attach.expect("Attach should have worked").id(), + resource_id + ); // Try attaching when well below the capacity. let attach = Collection::attach_resource( @@ -1412,7 +1430,7 @@ mod test { resource::table.into_boxed(), 10, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1426,12 +1444,18 @@ mod test { AttachError::NoUpdate { attached_count, resource, collection } => { assert_eq!(attached_count, 1); assert_eq!( - *resource.collection_id.as_ref().expect("Should already be attached"), + *resource + .collection_id + .as_ref() + .expect("Should already be attached"), collection_id ); assert_eq!(resource, get_resource(resource_id, &pool).await); - assert_eq!(collection, get_collection(collection_id, &pool).await); - }, + assert_eq!( + collection, + get_collection(collection_id, &pool).await + ); + } _ => panic!("Unexpected error: {:?}", err), }; @@ -1443,7 +1467,7 @@ mod test { resource::table.into_boxed(), 1, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1454,18 +1478,27 @@ mod test { AttachError::NoUpdate { attached_count, resource, collection } => { assert_eq!(attached_count, 1); assert_eq!( - *resource.collection_id.as_ref().expect("Should already be attached"), + *resource + .collection_id + .as_ref() + .expect("Should already be attached"), collection_id ); assert_eq!(resource, get_resource(resource_id, &pool).await); - assert_eq!(collection, get_collection(collection_id, &pool).await); - }, + assert_eq!( + collection, + get_collection(collection_id, &pool).await + ); + } _ => panic!("Unexpected error: {:?}", err), }; // The generation number should only have bumped once, from the original // resource insertion. - assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + assert_eq!( + collection.rcgen + 1, + get_collection(collection_id, &pool).await.rcgen + ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1484,7 +1517,8 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. - let collection = insert_collection(collection_id, "collection", &pool).await; + let collection = + insert_collection(collection_id, "collection", &pool).await; let _resource = insert_resource(resource_id, "resource", &pool).await; // Attach the resource to the collection. @@ -1495,7 +1529,9 @@ mod test { let attach = Collection::attach_resource( collection_id, resource_id, - collection::table.filter(collection::name.eq("collection")).into_boxed(), + collection::table + .filter(collection::name.eq("collection")) + .into_boxed(), resource::table.filter(resource::name.eq("resource")).into_boxed(), 10, // When actually performing the update, update the collection ID @@ -1503,11 +1539,10 @@ mod test { // // This provides an example of how one could attach an ID and update // the state of a resource simultaneously. - diesel::update(resource::table) - .set(( - resource::dsl::collection_id.eq(collection_id), - resource::dsl::description.eq("new description".to_string()) - )) + diesel::update(resource::table).set(( + resource::dsl::collection_id.eq(collection_id), + resource::dsl::description.eq("new description".to_string()), + )), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1519,7 +1554,10 @@ mod test { ); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); assert_eq!(returned_resource.description(), "new description"); - assert_eq!(collection.rcgen + 1, get_collection(collection_id, &pool).await.rcgen); + assert_eq!( + collection.rcgen + 1, + get_collection(collection_id, &pool).await.rcgen + ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1538,15 +1576,18 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. - let _collection = insert_collection(collection_id, "collection", &pool).await; + let _collection = + insert_collection(collection_id, "collection", &pool).await; let _resource = insert_resource(resource_id, "resource", &pool).await; // Immediately soft-delete the resource. - diesel::update(resource::table.filter(resource::dsl::id.eq(resource_id))) - .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) - .await - .unwrap(); + diesel::update( + resource::table.filter(resource::dsl::id.eq(resource_id)), + ) + .set(resource::dsl::time_deleted.eq(Utc::now())) + .execute_async(pool.pool()) + .await + .unwrap(); // Attach the resource to the collection. Observe a failure which is // indistinguishable from the resource not existing. @@ -1556,7 +1597,8 @@ mod test { collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table).set(resource::dsl::collection_id.eq(collection_id)) + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1578,22 +1620,27 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Create the collection and some resources. - let _collection = insert_collection(collection_id, "collection", &pool).await; + let _collection = + insert_collection(collection_id, "collection", &pool).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); - let _resource1 = insert_resource(resource_id1, "resource1", &pool).await; - let _resource2 = insert_resource(resource_id2, "resource2", &pool).await; + let _resource1 = + insert_resource(resource_id1, "resource1", &pool).await; + let _resource2 = + insert_resource(resource_id2, "resource2", &pool).await; // Attach the resource to the collection. // - // NOTE: In the update statement, we aren't filtering by resource ID. + // NOTE: In the update statement, we aren't filtering by resource ID, + // even though we explicitly have two "live" resources". let attach = Collection::attach_resource( collection_id, resource_id1, collection::table.into_boxed(), resource::table.into_boxed(), 10, - diesel::update(resource::table).set(resource::dsl::collection_id.eq(collection_id)) + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await; @@ -1601,100 +1648,21 @@ mod test { let returned_resource = attach.expect("Attach should have worked"); assert_eq!(returned_resource.id(), resource_id1); - assert_eq!(get_resource(resource_id1, &pool).await.collection_id.unwrap(), collection_id); - assert!(get_resource(resource_id2, &pool).await.collection_id.is_none()); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - // TODO: What if the filter is different in the resource vs update calls? - - // TODO: test no filter in update? - // TODO: Try to break things - // TODO: Sync API - - /* - #[tokio::test] - async fn test_collection_present() { - let logctx = dev::test_setup_log("test_collection_present"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new(&cfg); - - setup_db(&pool).await; - - let collection_id = uuid::Uuid::new_v4(); - let resource_id = uuid::Uuid::new_v4(); - - // Insert the collection so it's present later - diesel::insert_into(collection::table) - .values(vec![( - collection::dsl::id.eq(collection_id), - collection::dsl::name.eq("test"), - collection::dsl::description.eq("desc"), - collection::dsl::time_created.eq(Utc::now()), - collection::dsl::time_modified.eq(Utc::now()), - collection::dsl::rcgen.eq(1), - )]) - .execute_async(pool.pool()) - .await - .unwrap(); - - // Insert the resource so it's present later - let insert_time = - DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); - diesel::insert_into(resource::table) - .values(vec![( - resource::dsl::id.eq(resource_id), - resource::dsl::name.eq("test"), - resource::dsl::description.eq("desc"), - resource::dsl::time_created.eq(insert_time), - resource::dsl::time_modified.eq(insert_time), - resource::dsl::collection_id.eq(Option::::None), - )]) - .execute_async(pool.pool()) - .await - .unwrap(); - - // Attempt to attach the resource. - let update_time = - DateTime::::from_utc(NaiveDateTime::from_timestamp(1, 0), Utc); - let resource = Collection::attach_resource( - collection_id, - Generation::new(), - diesel::update( - resource::table - .filter(resource::dsl::id.eq(resource_id)) - .filter(resource::dsl::time_deleted.is_null()) - ).set(( - resource::dsl::collection_id.eq(collection_id), - resource::dsl::time_modified.eq(update_time), - )) - ) - .attach_and_get_result_async(pool.pool()) - .await - .unwrap(); - assert_eq!(resource.id(), resource_id); - assert_eq!(resource.name().as_str(), "test"); - assert_eq!(resource.description(), "desc"); - assert_eq!(resource.time_created(), insert_time); - assert_eq!(resource.time_modified(), update_time); - assert_eq!(resource.collection_id.unwrap(), collection_id); - - let collection_rcgen = collection::table - .find(collection_id) - .select(collection::dsl::rcgen) - .first_async::(pool.pool()) + // Note that only "resource1" should be attached. + // "resource2" should have automatically been filtered away from the + // update statement, regardless of user input. + assert_eq!( + get_resource(resource_id1, &pool).await.collection_id.unwrap(), + collection_id + ); + assert!(get_resource(resource_id2, &pool) .await - .unwrap(); - - // Make sure rcgen got incremented - assert_eq!(collection_rcgen, 2); + .collection_id + .is_none()); db.cleanup().await.unwrap(); logctx.cleanup_successful(); } - */ + // TODO: Sync API } From 6da1643fac9f0cfd660435ee1bb714b3cdab625a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 19 May 2022 18:18:15 -0400 Subject: [PATCH 11/29] Sync test added --- nexus/src/db/collection_attach.rs | 172 +++++++++++++----------------- 1 file changed, 76 insertions(+), 96 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index de26c3e1b78..c741d79abe0 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -426,16 +426,15 @@ where /// Result of [`AttachToCollectionStatement`] when executed asynchronously pub type AsyncAttachToCollectionResult = - Result>; + Result>; -/* /// Result of [`AttachToCollectionStatement`] when executed synchronously -pub type SyncAttachToCollectionResult = Result>; -*/ +pub type SyncAttachToCollectionResult = + Result>; /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] -pub enum AttachError { +pub enum AttachError { /// The collection that the query was inserting into does not exist CollectionNotFound, /// The resource being attached does not exist @@ -447,20 +446,9 @@ pub enum AttachError { /// condition was not met. NoUpdate { attached_count: i64, resource: ResourceType, collection: C }, /// Other database error - DatabaseError(PoolError), + DatabaseError(E), } -/* -/// Errors returned by [`AttachToCollectionStatement`]. -#[derive(Debug)] -pub enum SyncAttachError { - /// The collection that the query was inserting into does not exist - CollectionNotFound, - /// Other database error - DatabaseError(diesel::result::Error), -} -*/ - /// Describes the type returned from the actual CTE, which is parsed /// and interpreted before propagating it to users of the Rust API. pub type RawOutput = @@ -495,27 +483,27 @@ where .and_then(Self::parse_result) } - /* - /// Issues the CTE synchronously and parses the result. pub fn attach_and_get_result( self, conn: &mut DbConnection, - ) -> SyncAttachToCollectionResult + ) -> SyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. - Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, + Self: query_methods::LoadQuery< + 'static, + DbConnection, + RawOutput, + >, { - self.get_result::(conn) - .map_err(Self::translate_sync_error) - .map(parse_result) + self.get_result::>(conn) + .map_err(AttachError::DatabaseError) + .and_then(Self::parse_result) } - */ - - fn parse_result( + fn parse_result( result: RawOutput, - ) -> Result> { + ) -> Result> { let ( attached_count, collection_before_update, @@ -538,33 +526,6 @@ where }), } } - - /* - /// Translate from diesel errors into AttachError, handling the - /// intentional division-by-zero error in the CTE. - fn translate_async_error(err: PoolError) -> AttachError { - match err { - PoolError::Connection(ConnectionError::Query(err)) - if Self::error_is_division_by_zero(&err) => - { - AttachError::CollectionNotFound - } - other => AttachError::DatabaseError(other), - } - } - */ - - /* - /// Translate from diesel errors into SyncAttachError, handling the - /// intentional division-by-zero error in the CTE. - fn translate_sync_error(err: diesel::result::Error) -> SyncAttachError { - if Self::error_is_division_by_zero(&err) { - SyncAttachError::CollectionNotFound - } else { - SyncAttachError::DatabaseError(err) - } - } - */ } type SelectableSqlType = @@ -807,12 +768,11 @@ mod test { use super::{AttachError, DatastoreAttachTarget}; use crate::db::{ self, error::TransactionError, identity::Resource as IdentityResource, - model::Generation, }; use async_bb8_diesel::{ AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, }; - use chrono::{DateTime, NaiveDateTime, Utc}; + use chrono::Utc; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; use diesel::pg::Pg; @@ -1133,43 +1093,6 @@ mod test { assert!(matches!(attach, Err(AttachError::CollectionNotFound))); - /* - - let attach_query = Collection::attach_resource( - collection_id, - resource_id, - collection::table.into_boxed(), - resource::table.into_boxed(), - 10, - diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) - ); - - #[derive(Debug)] - enum CollectionError { - NotFound, - } - type TxnError = TransactionError; - - let result = pool - .pool() - .transaction(move |conn| { - attach_query.attach_and_get_result(conn).map_err(|e| match e { - SyncAttachError::CollectionNotFound => { - TxnError::CustomError(CollectionError::NotFound) - } - SyncAttachError::DatabaseError(e) => TxnError::from(e), - }) - }) - .await; - - assert!(matches!( - result, - Err(TxnError::CustomError(CollectionError::NotFound)) - )); - - */ - db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -1259,6 +1182,65 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_attach_once_synchronous() { + let logctx = dev::test_setup_log("test_attach_once_synchronous"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + + // Attach the resource to the collection. + let attach_query = Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 10, + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)), + ); + + type TxnError = TransactionError< + AttachError, + >; + let result = pool + .pool() + .transaction(move |conn| { + attach_query.attach_and_get_result(conn).map_err(|e| match e { + AttachError::DatabaseError(e) => TxnError::from(e), + e => TxnError::CustomError(e), + }) + }) + .await; + + // "attach_and_get_result" should return the "attached" resource. + let returned_resource = result.expect("Attach should have worked"); + assert_eq!( + returned_resource.collection_id.expect("Expected a collection ID"), + collection_id + ); + // The returned resource value should be the latest value in the DB. + assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + // The generation number should have incremented in the collection. + assert_eq!( + collection.rcgen + 1, + get_collection(collection_id, &pool).await.rcgen + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_attach_multiple_times() { let logctx = dev::test_setup_log("test_attach_multiple_times"); @@ -1663,6 +1645,4 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } - - // TODO: Sync API } From d7db1ed271dd8aa5953ee3f8ca2157378456124f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 20 May 2022 17:13:40 -0400 Subject: [PATCH 12/29] wip detach --- common/src/sql/dbinit.sql | 3 + nexus/src/app/instance.rs | 232 +---- nexus/src/app/sagas/disk_attach.rs | 129 --- nexus/src/app/sagas/mod.rs | 6 - nexus/src/db/collection_attach.rs | 65 +- nexus/src/db/collection_detach.rs | 1283 ++++++++++++++++++++++++++++ nexus/src/db/datastore.rs | 295 +++++++ nexus/src/db/mod.rs | 1 + nexus/src/db/model/instance.rs | 21 +- nexus/src/db/schema.rs | 1 + 10 files changed, 1685 insertions(+), 351 deletions(-) delete mode 100644 nexus/src/app/sagas/disk_attach.rs create mode 100644 nexus/src/db/collection_detach.rs diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 3e1d553f999..41373079da6 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -340,6 +340,9 @@ CREATE TABLE omicron.public.instance ( /* user data for instance initialization systems (e.g. cloud-init) */ user_data BYTES NOT NULL, + /* child resource generation number, per RFD 192 */ + rcgen INT NOT NULL, + /* * TODO Would it make sense for the runtime state to live in a separate * table? diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 4dab9af3f4f..b8e9c592591 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -579,154 +579,46 @@ impl super::Nexus { /// Attach a disk to an instance. pub async fn instance_attach_disk( - &self, + self: &Arc, opctx: &OpContext, organization_name: &Name, project_name: &Name, instance_name: &Name, disk_name: &Name, ) -> UpdateResult { - let (.., authz_project, authz_disk, db_disk) = + let (.., authz_project, authz_disk, _) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) .disk_name(disk_name) .fetch() .await?; - let (.., authz_instance, db_instance) = + let (.., authz_instance, _) = LookupPath::new(opctx, &self.db_datastore) .project_id(authz_project.id()) .instance_name(instance_name) .fetch() .await?; - let instance_id = &authz_instance.id(); - // Enforce attached disks limit - let attached_disks = self - .instance_list_disks( - opctx, - organization_name, - project_name, - instance_name, - &DataPageParams { - marker: None, - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(MAX_DISKS_PER_INSTANCE) - .unwrap(), - }, + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // Disk attach is only implemented for instances that are not + // currently running. This operation therefore can operate exclusively + // on database state. + // + // To implement hot-plug support, we should do the following in a saga: + // - We should update the state to "Attaching", rather than + // "Attached". + // - We should then issue a request to the associated sled agent. + // - Once that completes, we should update the disk state to "Attached". + let (_instance, disk) = self.db_datastore + .disk_attach( + &opctx, + &authz_instance, + &authz_disk, + MAX_DISKS_PER_INSTANCE, ) .await?; - - if attached_disks.len() == MAX_DISKS_PER_INSTANCE as usize { - return Err(Error::invalid_request(&format!( - "cannot attach more than {} disks to instance!", - MAX_DISKS_PER_INSTANCE - ))); - } - - fn disk_attachment_error( - disk: &db::model::Disk, - ) -> CreateResult { - let disk_status = match disk.runtime().state().into() { - DiskState::Destroyed => "disk is destroyed", - DiskState::Faulted => "disk is faulted", - DiskState::Creating => "disk is detached", - DiskState::Detached => "disk is detached", - - // It would be nice to provide a more specific message here, but - // the appropriate identifier to provide the user would be the - // other instance's name. Getting that would require another - // database hit, which doesn't seem worth it for this. - DiskState::Attaching(_) => { - "disk is attached to another instance" - } - DiskState::Attached(_) => { - "disk is attached to another instance" - } - DiskState::Detaching(_) => { - "disk is attached to another instance" - } - }; - let message = format!( - "cannot attach disk \"{}\": {}", - disk.name().as_str(), - disk_status - ); - Err(Error::InvalidRequest { message }) - } - - match &db_disk.state().into() { - // If we're already attaching or attached to the requested instance, - // there's nothing else to do. - // TODO-security should it be an error if you're not authorized to - // do this and we did not actually have to do anything? - DiskState::Attached(id) if id == instance_id => return Ok(db_disk), - - // If the disk is currently attaching or attached to another - // instance, fail this request. Users must explicitly detach first - // if that's what they want. If it's detaching, they have to wait - // for it to become detached. - // TODO-debug: the error message here could be better. We'd have to - // look up the other instance by id (and gracefully handle it not - // existing). - DiskState::Attached(id) => { - assert_ne!(id, instance_id); - return disk_attachment_error(&db_disk); - } - DiskState::Detaching(_) => { - return disk_attachment_error(&db_disk); - } - DiskState::Attaching(id) if id != instance_id => { - return disk_attachment_error(&db_disk); - } - DiskState::Destroyed => { - return disk_attachment_error(&db_disk); - } - DiskState::Faulted => { - return disk_attachment_error(&db_disk); - } - - DiskState::Creating => (), - DiskState::Detached => (), - DiskState::Attaching(id) => { - assert_eq!(id, instance_id); - } - } - - match &db_instance.runtime_state.state.state() { - // If there's a propolis zone for this instance, ask the Sled Agent - // to hot-plug the disk. - // - // TODO this will probably involve volume construction requests as - // well! - InstanceState::Running | InstanceState::Starting => { - // TODO: set state as "attaching". - // TODO: also can we check rcgens - - self.disk_set_runtime( - opctx, - &authz_disk, - &db_disk, - self.instance_sled(&db_instance).await?, - sled_agent_client::types::DiskStateRequested::Attached( - *instance_id, - ), - ) - .await?; - } - - _ => { - // If there is not a propolis zone, then disk attach only occurs - // in the DB. - let new_runtime = db_disk.runtime().attach(*instance_id); - - self.db_datastore - .disk_update_runtime(opctx, &authz_disk, &new_runtime) - .await?; - } - } - - self.db_datastore.disk_refetch(opctx, &authz_disk).await + Ok(disk) } /// Detach a disk from an instance. @@ -745,76 +637,32 @@ impl super::Nexus { .disk_name(disk_name) .fetch() .await?; - let (.., authz_instance, db_instance) = + let (.., authz_instance, _db_instance) = LookupPath::new(opctx, &self.db_datastore) .project_id(authz_project.id()) .instance_name(instance_name) .fetch() .await?; - let instance_id = &authz_instance.id(); - - match &db_disk.state().into() { - // This operation is a noop if the disk is not attached or already - // detaching from the same instance. - // TODO-security should it be an error if you're not authorized to - // do this and we did not actually have to do anything? - DiskState::Creating => return Ok(db_disk), - DiskState::Detached => return Ok(db_disk), - DiskState::Destroyed => return Ok(db_disk), - DiskState::Faulted => return Ok(db_disk), - DiskState::Detaching(id) if id == instance_id => { - return Ok(db_disk) - } - // This operation is not allowed if the disk is attached to some - // other instance. - DiskState::Attaching(id) if id != instance_id => { - return Err(Error::InvalidRequest { - message: String::from("disk is attached elsewhere"), - }); - } - DiskState::Attached(id) if id != instance_id => { - return Err(Error::InvalidRequest { - message: String::from("disk is attached elsewhere"), - }); - } - DiskState::Detaching(_) => { - return Err(Error::InvalidRequest { - message: String::from("disk is attached elsewhere"), - }); - } - - // These are the cases where we have to do something. - DiskState::Attaching(_) => (), - DiskState::Attached(_) => (), - } - - // If there's a propolis zone for this instance, ask the Sled - // Agent to hot-remove the disk. - match &db_instance.runtime_state.state.state() { - InstanceState::Running | InstanceState::Starting => { - self.disk_set_runtime( - opctx, - &authz_disk, - &db_disk, - self.instance_sled(&db_instance).await?, - sled_agent_client::types::DiskStateRequested::Detached, - ) - .await?; - } - - _ => { - // If there is not a propolis zone, then disk detach only occurs - // in the DB. - let new_runtime = db_disk.runtime().detach(); - - self.db_datastore - .disk_update_runtime(opctx, &authz_disk, &new_runtime) - .await?; - } - } - - self.db_datastore.disk_refetch(opctx, &authz_disk).await + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // Disk detach is only implemented for instances that are not + // currently running. This operation therefore can operate exclusively + // on database state. + // + // To implement hot-unplug support, we should do the following in a saga: + // - We should update the state to "Detaching", rather than + // "Detached". + // - We should then issue a request to the associated sled agent. + // - Once that completes, we should update the disk state to "Detached". + let disk = self.db_datastore + .disk_detach( + &opctx, + &authz_instance, + &authz_disk, + &db_disk, + ) + .await?; + Ok(disk) } /// Create a network interface attached to the provided instance. diff --git a/nexus/src/app/sagas/disk_attach.rs b/nexus/src/app/sagas/disk_attach.rs deleted file mode 100644 index 53502a23008..00000000000 --- a/nexus/src/app/sagas/disk_attach.rs +++ /dev/null @@ -1,129 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use crate::external_api::params; -use crate::saga_interface::SagaContext; -use crate::{authn, db}; -use lazy_static::lazy_static; -use serde::Deserialize; -use serde::Serialize; -use std::sync::Arc; -use steno::new_action_noop_undo; -use steno::ActionContext; -use steno::ActionError; -use steno::ActionFunc; -use steno::SagaTemplate; -use steno::SagaTemplateBuilder; -use steno::SagaType; -use uuid::Uuid; - -pub const SAGA_NAME: &'static str = "disk-attach"; - -lazy_static! { - pub static ref SAGA_TEMPLATE: Arc> = - Arc::new(saga_disk_attach()); -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct Params { - pub serialized_authn: authn::saga::Serialized, - pub instance_id: Uuid, - pub attach_params: params::InstanceDiskAttach, -} - -#[derive(Debug)] -pub struct SagaDiskAttach; -impl SagaType for SagaDiskAttach { - type SagaParamsType = Arc; - type ExecContextType = Arc; -} - -fn saga_disk_attach() -> SagaTemplate { - let mut template_builder = SagaTemplateBuilder::new(); - - template_builder.append( - "attaching_disk", - "SetDiskStateAttaching", - ActionFunc::new_action( - sda_set_disk_record_attaching, - sda_set_disk_record_attaching_undo, - ), - ); - - template_builder.append( - "sled_reported_runtime", - "UpdateSledAgent", - ActionFunc::new_action( - sda_update_sled_agent, - sda_update_sled_agent_undo, - ), - ); - - template_builder.append( - "disk_runtime", - "SetDiskStateAttached", - new_action_noop_undo(sda_set_disk_record_attached), - ); - - template_builder.build() -} - -async fn sda_set_disk_record_attaching( - sagactx: ActionContext, -) -> Result { - let _osagactx = sagactx.user_data(); - let _params = sagactx.saga_params(); - - // TODO: Issue CTE - // - // To actually perform the update: - // - // - Disk State must be: - // Attaching (w/Instance ID = ID | Attached (w/Instance ID = ID) | Detached - // - // - Instance state must be: - // Running | Starting | Rebooting | Migrating -> Issue attach to sled - // Stopping | Stopped | Repairing -> Update DB - // _ -> Error - // - // - # of attached disks must be less than capacity - - todo!(); -} - -async fn sda_set_disk_record_attaching_undo( - sagactx: ActionContext, -) -> Result<(), anyhow::Error> { - let _osagactx = sagactx.user_data(); - - // TODO: If we get here, we must have attached the disk. - // Ergo, set the state to "detached"? - todo!(); -} - -async fn sda_update_sled_agent( - sagactx: ActionContext, -) -> Result { - let _log = sagactx.user_data().log(); - - // TODO: call "disk_put" - todo!(); -} - -async fn sda_update_sled_agent_undo( - _sagactx: ActionContext, -) -> Result<(), anyhow::Error> { - // TODO: Undo the "disk_put". - todo!(); -} - -async fn sda_set_disk_record_attached( - sagactx: ActionContext, -) -> Result { - let _osagactx = sagactx.user_data(); - - // TODO: Move the disk state from "Attaching" -> "Attached" - - todo!(); -} diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index 1b97ecb7e69..ba3a88e7575 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -20,7 +20,6 @@ use steno::SagaTemplateGeneric; use steno::SagaType; use uuid::Uuid; -pub mod disk_attach; pub mod disk_create; pub mod disk_delete; pub mod instance_create; @@ -45,11 +44,6 @@ fn all_templates( Arc::clone(&instance_migrate::SAGA_TEMPLATE) as Arc>>, ), - ( - disk_attach::SAGA_NAME, - Arc::clone(&disk_attach::SAGA_TEMPLATE) - as Arc>>, - ), ( disk_create::SAGA_NAME, Arc::clone(&disk_create::SAGA_TEMPLATE) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index c741d79abe0..da167bab7fb 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -368,6 +368,7 @@ pub trait DatastoreAttachTarget: Selectable { .filter(resource_table().primary_key().eq(resource_id)); let collection_from_clause = collection_table().from_clause(); + let collection_returning_clause = Self::as_returning(); let resource_returning_clause = ResourceType::as_returning(); AttachToCollectionStatement { collection_exists_query, @@ -378,6 +379,7 @@ pub trait DatastoreAttachTarget: Selectable { max_attached_resources, update_resource_statement, collection_from_clause, + collection_returning_clause, resource_returning_clause, } } @@ -410,6 +412,8 @@ where // Describes the target of the collection table UPDATE. collection_from_clause: as QuerySource>::FromClause, + // Describes what should be returned after UPDATE-ing the collection. + collection_returning_clause: AsSelect, // Describes what should be returned after UPDATE-ing the resource. resource_returning_clause: AsSelect, } @@ -426,11 +430,11 @@ where /// Result of [`AttachToCollectionStatement`] when executed asynchronously pub type AsyncAttachToCollectionResult = - Result>; + Result<(C, ResourceType), AttachError>; /// Result of [`AttachToCollectionStatement`] when executed synchronously pub type SyncAttachToCollectionResult = - Result>; + Result<(C, ResourceType), AttachError>; /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] @@ -452,7 +456,7 @@ pub enum AttachError { /// Describes the type returned from the actual CTE, which is parsed /// and interpreted before propagating it to users of the Rust API. pub type RawOutput = - (i64, Option, Option, Option); + (i64, Option, Option, Option, Option); impl AttachToCollectionStatement where @@ -503,11 +507,12 @@ where fn parse_result( result: RawOutput, - ) -> Result> { + ) -> Result<(C, ResourceType), AttachError> { let ( attached_count, collection_before_update, resource_before_update, + collection_after_update, resource_after_update, ) = result; @@ -517,13 +522,14 @@ where let resource_before_update = resource_before_update .ok_or_else(|| AttachError::ResourceNotFound)?; - match resource_after_update { - Some(resource) => Ok(resource), - None => Err(AttachError::NoUpdate { + match (collection_after_update, resource_after_update) { + (Some(collection), Some(resource)) => Ok((collection, resource)), + (None, None) => Err(AttachError::NoUpdate { attached_count, resource: resource_before_update, collection: collection_before_update, }), + _ => panic!("Partial update applied - this is a CTE bug"), } } } @@ -544,6 +550,8 @@ where Nullable>, // If the resource exists, the value before update. Nullable>, + // If the collection was updated, the new value. + Nullable>, // If the resource was updated, the new value. Nullable>, ); @@ -644,7 +652,7 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // updated_collection AS MATERIALIZED ( /// // UPDATE C SET = + 1 /// // WHERE IN (SELECT FROM collection_info) AND (SELECT * FROM do_update) -/// // RETURNING 1 +/// // RETURNING * /// // ), /// // /* Update the resource */ /// // updated_resource AS ( @@ -656,6 +664,7 @@ type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< /// // (SELECT * FROM resource_count) /// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE /// // LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE +/// // LEFT JOIN (SELECT * FROM updated_collection) ON TRUE /// // LEFT JOIN (SELECT * FROM resource) ON TRUE; /// ``` impl QueryFragment @@ -672,6 +681,8 @@ where QueryFragment, // Necessary to "walk_ast" over "self.resource_returning_clause". AsSelect: QueryFragment, + // Necessary to "walk_ast" over "self.collection_returning_clause". + AsSelect: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); @@ -718,9 +729,8 @@ where out.push_identifier(CollectionPrimaryKey::::NAME)?; out.push_sql(" IN (SELECT "); out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql( - " FROM collection_info) AND (SELECT * FROM do_update) RETURNING 1", - ); + out.push_sql(" FROM collection_info) AND (SELECT * FROM do_update) RETURNING "); + self.collection_returning_clause.walk_ast(out.reborrow())?; out.push_sql("), "); out.push_sql("updated_resource AS ("); @@ -756,6 +766,7 @@ where (SELECT * FROM resource_count) \ LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ LEFT JOIN (SELECT * FROM updated_resource) ON TRUE;", ); @@ -1040,7 +1051,14 @@ mod test { WHERE \ \"id\" IN (SELECT \"id\" FROM collection_info) AND \ (SELECT * FROM do_update) \ - RETURNING 1\ + RETURNING \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\"\ ), \ updated_resource AS (\ UPDATE \ @@ -1058,11 +1076,12 @@ mod test { \"test_schema\".\"resource\".\"time_modified\", \ \"test_schema\".\"resource\".\"time_deleted\", \ \"test_schema\".\"resource\".\"collection_id\"\ - ) \ + ) \ SELECT * FROM \ (SELECT * FROM resource_count) \ LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; assert_eq!(query, expected_query); } @@ -1165,12 +1184,13 @@ mod test { .await; // "attach_and_get_result_async" should return the "attached" resource. - let returned_resource = attach.expect("Attach should have worked"); + let (returned_collection, returned_resource) = attach.expect("Attach should have worked"); assert_eq!( returned_resource.collection_id.expect("Expected a collection ID"), collection_id ); - // The returned resource value should be the latest value in the DB. + // The returned value should be the latest value in the DB. + assert_eq!(returned_collection, get_collection(collection_id, &pool).await); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); // The generation number should have incremented in the collection. assert_eq!( @@ -1224,12 +1244,13 @@ mod test { .await; // "attach_and_get_result" should return the "attached" resource. - let returned_resource = result.expect("Attach should have worked"); + let (returned_collection, returned_resource) = result.expect("Attach should have worked"); assert_eq!( returned_resource.collection_id.expect("Expected a collection ID"), collection_id ); - // The returned resource value should be the latest value in the DB. + // The returned values should be the latest value in the DB. + assert_eq!(returned_collection, get_collection(collection_id, &pool).await); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); // The generation number should have incremented in the collection. assert_eq!( @@ -1278,7 +1299,7 @@ mod test { .await; // "attach_and_get_result_async" should return the "attached" resource. - let returned_resource = attach.expect("Attach should have worked"); + let (_, returned_resource) = attach.expect("Attach should have worked"); assert_eq!( returned_resource .collection_id @@ -1330,7 +1351,7 @@ mod test { .attach_and_get_result_async(pool.pool()) .await; assert_eq!( - attach.expect("Attach should have worked").id(), + attach.expect("Attach should have worked").1.id(), resource_id1 ); @@ -1400,7 +1421,7 @@ mod test { .attach_and_get_result_async(pool.pool()) .await; assert_eq!( - attach.expect("Attach should have worked").id(), + attach.expect("Attach should have worked").1.id(), resource_id ); @@ -1529,7 +1550,7 @@ mod test { .attach_and_get_result_async(pool.pool()) .await; - let returned_resource = attach.expect("Attach should have worked"); + let (_, returned_resource) = attach.expect("Attach should have worked"); assert_eq!( returned_resource.collection_id.expect("Expected a collection ID"), collection_id @@ -1627,7 +1648,7 @@ mod test { .attach_and_get_result_async(pool.pool()) .await; - let returned_resource = attach.expect("Attach should have worked"); + let (_, returned_resource) = attach.expect("Attach should have worked"); assert_eq!(returned_resource.id(), resource_id1); // Note that only "resource1" should be attached. diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs new file mode 100644 index 00000000000..9119ba12e12 --- /dev/null +++ b/nexus/src/db/collection_detach.rs @@ -0,0 +1,1283 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! CTE for detaching a resource from a collection. +//! +//! This atomically: +//! - Checks if the collection exists and is not soft deleted +//! - Checks if the resource exists and is not soft deleted +//! - Validates conditions on both the collection and resource +//! - Updates the resource row + +use super::pool::DbConnection; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; +use crate::db::collection_attach::DatastoreAttachTarget; +use diesel::associations::HasTable; +use diesel::expression::Expression; +use diesel::helper_types::*; +use diesel::pg::Pg; +use diesel::prelude::*; +use diesel::query_builder::*; +use diesel::query_dsl::methods as query_methods; +use diesel::query_source::Table; +use diesel::sql_types::{Nullable, SingleValue}; +use std::fmt::Debug; + +/// The table representing the collection. The resource references +/// this table. +type CollectionTable = <>::CollectionIdColumn as Column>::Table; +/// The table representing the resource. This table contains an +/// ID acting as a foreign key into the collection table. +type ResourceTable = <>::ResourceCollectionIdColumn as Column>::Table; +/// The default WHERE clause of the resource table. +type ResourceTableWhereClause = + as IntoUpdateTarget>::WhereClause; +type CollectionIdColumn = + >::CollectionIdColumn; +type ResourceIdColumn = + >::ResourceIdColumn; + +/// Trick to check that columns come from the same table +pub trait TypesAreSame2 {} +impl TypesAreSame2 for (T, T) {} +pub trait TypesAreSame3 {} +impl TypesAreSame3 for (T, T, T) {} + +/// Trait to be implemented by structs representing a detachable collection. +/// +/// A blanket implementation is provided for traits that implement +/// [`DatastoreAttachTarget`]. +pub trait DatastoreDetachTarget: Selectable { + /// The Rust type of the collection and resource ids (typically Uuid). + type Id: Copy + Debug + PartialEq + Send + 'static; + + /// The primary key column of the collection. + type CollectionIdColumn: Column; + + /// The time deleted column in the CollectionTable + type CollectionTimeDeletedColumn: Column + Default; + + /// The primary key column of the resource + type ResourceIdColumn: Column; + + /// The column in the resource acting as a foreign key into the Collection + type ResourceCollectionIdColumn: Column + Default; + + /// The time deleted column in the ResourceTable + type ResourceTimeDeletedColumn: Column + Default; + + /// Creates a statement for detaching a resource from the given collection. + /// + /// This statement allows callers to atomically check the state of a + /// collection and a resource while detaching a resource. + /// + /// - `collection_id`: Primary key of the collection being removed from. + /// - `resource_id`: Primary key of the resource being detached. + /// - `collection_query`: An optional query for collection state. The + /// CTE will automatically filter this query to `collection_id`, and + /// validate that the "time deleted" column is NULL. + /// - `resource_query`: An optional query for the resource state. The + /// CTE will automatically filter this query to `resource_id`, + /// validate that the "time deleted" column is NULL, and validate that the + /// "collection_id" column points to `collection_id`. + /// - `update`: An update statement, identifying how the resource object + /// should be modified to be detached + /// + /// The V type refers to the "update target" of the UpdateStatement, + /// and should generally be inferred rather than explicitly specified. + fn detach_resource( + collection_id: Self::Id, + resource_id: Self::Id, + + collection_query: BoxedQuery>, + resource_query: BoxedQuery>, + + // We are intentionally picky about this update statement: + // - The second argument - the WHERE clause - must match the default + // for the table. This encourages the "resource_query" filter to be + // used instead, and makes it possible for the CTE to modify the + // filter here (ensuring "resource_id" is selected). + // - Additionally, UpdateStatement's fourth argument defaults to Ret = + // NoReturningClause. This enforces that the given input statement does + // not have a RETURNING clause, and also lets the CTE control this + // value. + update: UpdateStatement< + ResourceTable, + ResourceTableWhereClause, + V, + >, + ) -> DetachFromCollectionStatement + where + // Ensure the "collection" columns all belong to the same table. + ( + ::Table, + ::Table, + ): TypesAreSame2, + // Ensure the "resource" columns all belong to the same table. + ( + ::Table, + ::Table, + ::Table, + ): TypesAreSame3, + Self: Sized, + // Enables the "table()" method on the Collection. + CollectionTable: HasTable
> + + 'static + + Send + + Table + // Allows calling ".into_boxed()" on the table. + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput>, + >, + // Enables the "table()" method on the Resource. + ResourceTable: HasTable
> + + 'static + + Send + + Table + // Allows calling ".into_boxed()" on the table. + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput>, + >, + // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". + as QuerySource>::FromClause: + QueryFragment + Send, + // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". + as QuerySource>::FromClause: + QueryFragment + Send, + // Allows sending "collection_exists_query" between threads. + as AsQuery>::SqlType: Send, + // Allows sending "resource_exists_query" between threads. + as AsQuery>::SqlType: Send, + // Allows calling ".filter()" on the boxed collection table. + BoxedQuery>: + query_methods::FilterDsl< + Eq, Self::Id>, + Output = BoxedQuery>, + // Filter by time deleted = NULL + > + query_methods::FilterDsl< + IsNull, + Output = BoxedQuery>, + >, + // Allows calling ".filter()" on the boxed resource table. + BoxedQuery>: query_methods::FilterDsl< + Eq, Self::Id>, + Output = BoxedQuery>, + // Filter by collection ID = ID + > + query_methods::FilterDsl< + Eq, + Output = BoxedQuery>, + // Filter by time deleted = NULL + > + query_methods::FilterDsl< + IsNull, + Output = BoxedQuery>, + >, + + // See: "update_resource_statement". + // + // Allows referencing the default "WHERE" clause of the update + // statement. + ResourceTable: IntoUpdateTarget, + // Allows calling "update.into_boxed()" + UpdateStatement< + ResourceTable, + ResourceTableWhereClause, + V, + >: query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement< + 'static, + Pg, + ResourceTable, + V, + >, + >, + // Allows calling + // ".filter(resource_table().primary_key().eq(resource_id)" on the + // boxed update statement. + BoxedUpdateStatement<'static, Pg, ResourceTable, V>: + query_methods::FilterDsl< + Eq, Self::Id>, + Output = BoxedUpdateStatement< + 'static, + Pg, + ResourceTable, + V, + >, + >, + + // Allows using "id" in expressions (e.g. ".eq(...)") with... + Self::Id: diesel::expression::AsExpression< + // ... The Collection table's PK + SerializedCollectionPrimaryKey, + > + diesel::expression::AsExpression< + // ... The Resource table's PK + SerializedResourcePrimaryKey, + > + diesel::expression::AsExpression< + // ... The Resource table's FK to the Collection table + SerializedResourceForeignKey, + >, + as Expression>::SqlType: + SingleValue, + as Expression>::SqlType: + SingleValue, + ::SqlType: SingleValue, + + // Allows calling "is_null()" on the following columns. + Self::CollectionTimeDeletedColumn: ExpressionMethods, + Self::ResourceTimeDeletedColumn: ExpressionMethods, + Self::ResourceCollectionIdColumn: ExpressionMethods, + + // Necessary to actually select the resource in the output type. + ResourceType: Selectable, + { + let collection_table = + || as HasTable>::table(); + let resource_table = + || as HasTable>::table(); + + // Create new queries to determine if the collection and resources + // already exist. + let collection_exists_query = Box::new( + collection_table() + .into_boxed() + .filter(collection_table().primary_key().eq(collection_id)) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()), + ); + let resource_exists_query = Box::new( + resource_table() + .into_boxed() + .filter(resource_table().primary_key().eq(resource_id)) + .filter(Self::ResourceTimeDeletedColumn::default().is_null()), + ); + + // For the queries which decide whether or not we'll perform the update, + // extend the user-provided arguments. + // + // We force these queries to: + // - Check against the primary key of the target objects + // - Ensure the objects are not deleted + // - (for the resource) Ensure it is attached + // - (for the update) Ensure that only the resource with "resource_id" + // is modified. + let collection_query = Box::new( + collection_query + .filter(collection_table().primary_key().eq(collection_id)) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()), + ); + let resource_query = Box::new( + resource_query + .filter(resource_table().primary_key().eq(resource_id)) + .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + .filter(Self::ResourceCollectionIdColumn::default().eq(collection_id)), + ); + + let update_resource_statement = update + .into_boxed() + .filter(resource_table().primary_key().eq(resource_id)); + + let resource_returning_clause = ResourceType::as_returning(); + DetachFromCollectionStatement { + collection_exists_query, + resource_exists_query, + collection_query, + resource_query, + update_resource_statement, + resource_returning_clause, + } + } +} + +impl DatastoreDetachTarget for T +where T: DatastoreAttachTarget { + type Id = T::Id; + type CollectionIdColumn = T::CollectionIdColumn; + type CollectionTimeDeletedColumn = T::CollectionTimeDeletedColumn; + type ResourceIdColumn = T::ResourceIdColumn; + type ResourceCollectionIdColumn = T::ResourceCollectionIdColumn; + type ResourceTimeDeletedColumn = T::ResourceTimeDeletedColumn; +} + +/// The CTE described in the module docs +#[must_use = "Queries must be executed"] +pub struct DetachFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachTarget, +{ + // Query which answers: "Does the collection exist?" + collection_exists_query: Box + Send>, + // Query which answers: "Does the resource exist?" + resource_exists_query: Box + Send>, + // A (mostly) user-provided query for validating the collection. + collection_query: Box + Send>, + // A (mostly) user-provided query for validating the resource. + resource_query: Box + Send>, + + // Update statement for the resource. + update_resource_statement: + BoxedUpdateStatement<'static, Pg, ResourceTable, V>, + // Describes what should be returned after UPDATE-ing the resource. + resource_returning_clause: AsSelect, +} + +impl QueryId + for DetachFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachTarget, +{ + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +/// Result of [`DetachFromCollectionStatement`] when executed asynchronously +pub type AsyncDetachFromCollectionResult = + Result>; + +/// Result of [`DetachFromCollectionStatement`] when executed synchronously +pub type SyncDetachFromCollectionResult = + Result>; + +/// Errors returned by [`DetachFromCollectionStatement`]. +#[derive(Debug)] +pub enum DetachError { + /// The collection that the query was removing from does not exist + CollectionNotFound, + /// The resource being detached does not exist + ResourceNotFound, + /// Although the resource and collection exist, the update did not occur + /// + /// The unchanged resource and collection are returned as a part of this + /// error; it is the responsibility of the caller to determine which + /// condition was not met. + NoUpdate { resource: ResourceType, collection: C }, + /// Other database error + DatabaseError(E), +} + +/// Describes the type returned from the actual CTE, which is parsed +/// and interpreted before propagating it to users of the Rust API. +pub type RawOutput = + (i64, Option, Option, Option); + +impl DetachFromCollectionStatement +where + ResourceType: 'static + Debug + Send + Selectable, + C: 'static + Debug + DatastoreDetachTarget + Send, + ResourceTable: 'static + Table + Send + Copy + Debug, + V: 'static + Send, + DetachFromCollectionStatement: Send, +{ + /// Issues the CTE asynchronously and parses the result. + pub async fn detach_and_get_result_async( + self, + pool: &bb8::Pool>, + ) -> AsyncDetachFromCollectionResult + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery< + 'static, + DbConnection, + RawOutput, + >, + { + self.get_result_async::>(pool) + .await + // If the database returns an error, propagate it right away. + .map_err(DetachError::DatabaseError) + // Otherwise, parse the output to determine if the CTE succeeded. + .and_then(Self::parse_result) + } + + /// Issues the CTE synchronously and parses the result. + pub fn detach_and_get_result( + self, + conn: &mut DbConnection, + ) -> SyncDetachFromCollectionResult + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery< + 'static, + DbConnection, + RawOutput, + >, + { + self.get_result::>(conn) + .map_err(DetachError::DatabaseError) + .and_then(Self::parse_result) + } + + fn parse_result( + result: RawOutput, + ) -> Result> { + let ( + _, + collection_before_update, + resource_before_update, + resource_after_update, + ) = result; + + let collection_before_update = collection_before_update + .ok_or_else(|| DetachError::CollectionNotFound)?; + + let resource_before_update = resource_before_update + .ok_or_else(|| DetachError::ResourceNotFound)?; + + match resource_after_update { + Some(resource) => Ok(resource), + None => Err(DetachError::NoUpdate { + resource: resource_before_update, + collection: collection_before_update, + }), + } + } +} + +type SelectableSqlType = + <>::SelectExpression as Expression>::SqlType; + +impl Query + for DetachFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachTarget, +{ + type SqlType = ( + // Ignored "SELECT 1" value + diesel::sql_types::BigInt, + // If the collection exists, the value before update. + Nullable>, + // If the resource exists, the value before update. + Nullable>, + // If the resource was updated, the new value. + Nullable>, + ); +} + +impl RunQueryDsl + for DetachFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachTarget, +{ +} + +// Representation of Primary Key in Rust. +type CollectionPrimaryKey = + as Table>::PrimaryKey; +type ResourcePrimaryKey = + as Table>::PrimaryKey; +type ResourceForeignKey = + >::ResourceCollectionIdColumn; + +// Representation of Primary Key in SQL. +type SerializedCollectionPrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourcePrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourceForeignKey = + as diesel::Expression>::SqlType; + +type TableSqlType = ::SqlType; + +type BoxedQuery = diesel::helper_types::IntoBoxed<'static, T, Pg>; +type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< + 'static, + TableSqlType, + diesel::internal::table_macro::FromClause, + Pg, +>; + +/// This implementation uses a CTE which attempts to do the following: +/// +/// 1. (collection_by_id, resource_by_id): Identify if the collection and +/// resource objects exist at all. +/// 2. (collection_info, resource_info): Checks for arbitrary user-provided +/// constraints on the collection and resource objects. +/// 3. (do_update): IFF all previous checks succeeded, make a decision to perfom +/// an update. +/// 4. (updated_resource): Apply user-provided updates on the resource - +/// presumably, setting the collection ID value. +/// +/// This is implemented as follows: +/// +/// ```text +/// // WITH +/// // /* Look up the collection - Check for existence only! */ +/// // collection_by_id AS ( +/// // SELECT * FROM C +/// // WHERE = AND IS NULL +/// // FOR UPDATE +/// // ), +/// // /* Look up the resource - Check for existence only! */ +/// // resource_by_id AS ( +/// // SELECT * FROM R +/// // WHERE = AND IS NULL +/// // FOR UPDATE +/// // ), +/// // /* Look up the collection - Check for additional constraints */ +/// // collection_info AS ( +/// // SELECT * FROM C +/// // WHERE = AND IS NULL AND +/// // +/// // FOR UPDATE +/// // ), +/// // /* Look up the resource - Check for additional constraints */ +/// // resource_info AS ( +/// // SELECT * FROM R +/// // WHERE = AND IS NULL AND +/// // = AND +/// // FOR UPDATE +/// // ), +/// // /* Make a decision on whether or not to apply ANY updates */ +/// // do_update AS ( +/// // SELECT IF( +/// // EXISTS(SELECT id FROM collection_info) AND +/// // EXISTS(SELECT id FROM resource_info), +/// // TRUE, FALSE), +/// // ), +/// // /* Update the resource */ +/// // updated_resource AS ( +/// // UPDATE R SET +/// // WHERE IN (SELECT FROM resource_info) AND (SELECT * FROM do_update) +/// // RETURNING * +/// // ) +/// // SELECT * FROM +/// // (SELECT 1) +/// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE +/// // LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE +/// // LEFT JOIN (SELECT * FROM resource) ON TRUE; +/// ``` +impl QueryFragment + for DetachFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachTarget, + CollectionPrimaryKey: diesel::Column, + // Necessary to "walk_ast" over "self.update_resource_statement". + BoxedUpdateStatement<'static, Pg, ResourceTable, V>: + QueryFragment, + // Necessary to "walk_ast" over "self.resource_returning_clause". + AsSelect: QueryFragment, + // Necessary to "walk_ast" over "self.collection_returning_clause". + AsSelect: QueryFragment, +{ + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + out.unsafe_to_cache_prepared(); + out.push_sql("WITH collection_by_id AS ("); + self.collection_exists_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("resource_by_id AS ("); + self.resource_exists_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("collection_info AS ("); + self.collection_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("resource_info AS ("); + self.resource_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("do_update AS (SELECT IF(EXISTS(SELECT "); + out.push_identifier(CollectionIdColumn::::NAME)?; + out.push_sql(" FROM collection_info) AND EXISTS(SELECT "); + out.push_identifier(ResourceIdColumn::::NAME)?; + out.push_sql(" FROM resource_info), TRUE,FALSE)), "); + + out.push_sql("updated_resource AS ("); + self.update_resource_statement.walk_ast(out.reborrow())?; + // NOTE: It is safe to start with "AND" - we forced the update statement + // to have a WHERE clause on the primary key of the resource. + out.push_sql(" AND (SELECT * FROM do_update)"); + out.push_sql(" RETURNING "); + self.resource_returning_clause.walk_ast(out.reborrow())?; + out.push_sql(") "); + + // Why do all these LEFT JOINs here? In short, to ensure that we are + // always returning a constant number of columns. + // + // Diesel parses output "one column at a time", mapping to structs or + // tuples. For example, when deserializing an "Option<(A, B, C)>" object, + // Diesel checks nullability of the "A", "B", and "C" columns. + // If any of those columns unexpectedly return NULL, the entire object is + // treated as "None". + // + // In summary: + // - Without the LEFT JOINs, we'd occassionally be returning "zero + // rows", which would make the output entirely unparseable. + // - If we used an operation like COALESCE (which attempts to map the + // result of an expression to either "NULL" or a single tuple column), + // Diesel struggles to map the result back to a structure. + // + // By returning a static number of columns, each component of the + // "RawOutput" tuple can be parsed, regardless of nullability, without + // preventing later portions of the result from being parsed. + out.push_sql( + "SELECT * FROM \ + (SELECT 1) \ + LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_resource) ON TRUE;", + ); + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use crate::db::collection_attach::DatastoreAttachTarget; + use super::{DetachError, DatastoreDetachTarget}; + use crate::db::{ + self, error::TransactionError, identity::Resource as IdentityResource, + }; + use async_bb8_diesel::{ + AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + }; + use chrono::Utc; + use db_macros::Resource; + use diesel::expression_methods::ExpressionMethods; + use diesel::pg::Pg; + use diesel::QueryDsl; + use diesel::SelectableHelper; + use nexus_test_utils::db::test_setup_database; + use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; + use omicron_test_utils::dev; + use uuid::Uuid; + + table! { + test_schema.collection (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + rcgen -> Int8, + } + } + + table! { + test_schema.resource (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + collection_id -> Nullable, + } + } + + async fn setup_db(pool: &crate::db::Pool) { + let connection = pool.pool().get().await.unwrap(); + (*connection) + .batch_execute_async( + "CREATE SCHEMA IF NOT EXISTS test_schema; \ + CREATE TABLE IF NOT EXISTS test_schema.collection ( \ + id UUID PRIMARY KEY, \ + name STRING(63) NOT NULL, \ + description STRING(512) NOT NULL, \ + time_created TIMESTAMPTZ NOT NULL, \ + time_modified TIMESTAMPTZ NOT NULL, \ + time_deleted TIMESTAMPTZ, \ + rcgen INT NOT NULL); \ + CREATE TABLE IF NOT EXISTS test_schema.resource( \ + id UUID PRIMARY KEY, \ + name STRING(63) NOT NULL, \ + description STRING(512) NOT NULL, \ + time_created TIMESTAMPTZ NOT NULL, \ + time_modified TIMESTAMPTZ NOT NULL, \ + time_deleted TIMESTAMPTZ, \ + collection_id UUID); \ + CREATE INDEX IF NOT EXISTS collection_index ON test_schema.resource ( \ + collection_id \ + ) WHERE collection_id IS NOT NULL AND time_deleted IS NULL; \ + TRUNCATE test_schema.collection; \ + TRUNCATE test_schema.resource", + ) + .await + .unwrap(); + } + + /// Describes a resource within the database. + #[derive( + Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq, + )] + #[diesel(table_name = resource)] + struct Resource { + #[diesel(embed)] + pub identity: ResourceIdentity, + pub collection_id: Option, + } + + #[derive( + Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq, + )] + #[diesel(table_name = collection)] + struct Collection { + #[diesel(embed)] + pub identity: CollectionIdentity, + pub rcgen: i64, + } + + impl DatastoreAttachTarget for Collection { + type Id = uuid::Uuid; + + type CollectionIdColumn = collection::dsl::id; + type CollectionGenerationColumn = collection::dsl::rcgen; + type CollectionTimeDeletedColumn = collection::dsl::time_deleted; + + type ResourceIdColumn = resource::dsl::id; + type ResourceCollectionIdColumn = resource::dsl::collection_id; + type ResourceTimeDeletedColumn = resource::dsl::time_deleted; + } + + async fn insert_collection( + id: Uuid, + name: &str, + pool: &db::Pool, + ) -> Collection { + let create_params = IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "description".to_string(), + }; + let c = Collection { + identity: CollectionIdentity::new(id, create_params), + rcgen: 1, + }; + + diesel::insert_into(collection::table) + .values(c) + .execute_async(pool.pool()) + .await + .unwrap(); + + get_collection(id, &pool).await + } + + async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + collection::table + .find(id) + .select(Collection::as_select()) + .first_async(pool.pool()) + .await + .unwrap() + } + + async fn insert_resource( + id: Uuid, + name: &str, + pool: &db::Pool, + ) -> Resource { + let create_params = IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "description".to_string(), + }; + let r = Resource { + identity: ResourceIdentity::new(id, create_params), + collection_id: None, + }; + + diesel::insert_into(resource::table) + .values(r) + .execute_async(pool.pool()) + .await + .unwrap(); + + get_resource(id, &pool).await + } + + async fn attach_resource( + collection_id: Uuid, + resource_id: Uuid, + pool: &db::Pool, + ) { + Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 100, + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)) + ) + .attach_and_get_result_async(pool.pool()) + .await + .unwrap(); + } + + async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + resource::table + .find(id) + .select(Resource::as_select()) + .first_async(pool.pool()) + .await + .unwrap() + } + + #[test] + fn test_verify_query() { + let collection_id = + uuid::Uuid::parse_str("cccccccc-cccc-cccc-cccc-cccccccccccc") + .unwrap(); + let resource_id = + uuid::Uuid::parse_str("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa") + .unwrap(); + let detach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ); + let query = diesel::debug_query::(&detach).to_string(); + + let expected_query = "WITH \ + collection_by_id AS (\ + SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\" \ + FROM \"test_schema\".\"collection\" \ + WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $1) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + resource_by_id AS (\ + SELECT \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\" \ + FROM \"test_schema\".\"resource\" \ + WHERE (\ + (\"test_schema\".\"resource\".\"id\" = $2) AND \ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + collection_info AS (\ + SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\", \ + \"test_schema\".\"collection\".\"rcgen\" \ + FROM \"test_schema\".\"collection\" \ + WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $3) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + resource_info AS (\ + SELECT \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\" \ + FROM \"test_schema\".\"resource\" \ + WHERE ((\ + (\"test_schema\".\"resource\".\"id\" = $4) AND \ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL)) AND \ + (\"test_schema\".\"resource\".\"collection_id\" = $5)\ + ) FOR UPDATE\ + ), \ + do_update AS (\ + SELECT IF(\ + EXISTS(SELECT \"id\" FROM collection_info) AND \ + EXISTS(SELECT \"id\" FROM resource_info), \ + TRUE,\ + FALSE)\ + ), \ + updated_resource AS (\ + UPDATE \ + \"test_schema\".\"resource\" \ + SET \ + \"collection_id\" = $6 \ + WHERE \ + (\"test_schema\".\"resource\".\"id\" = $7) AND \ + (SELECT * FROM do_update) \ + RETURNING \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\"\ + ) \ + SELECT * FROM \ + (SELECT 1) \ + LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, None, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; + assert_eq!(query, expected_query); + } + + #[tokio::test] + async fn test_detach_missing_collection_fails() { + let logctx = + dev::test_setup_log("test_detach_missing_collection_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + let detach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + assert!(matches!(detach, Err(DetachError::CollectionNotFound))); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_missing_resource_fails() { + let logctx = dev::test_setup_log("test_detach_missing_resource_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection + let collection = + insert_collection(collection_id, "collection", &pool).await; + + // Attempt to attach - even though the resource does not exist. + let detach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + assert!(matches!(detach, Err(DetachError::ResourceNotFound))); + // The collection should remain unchanged. + assert_eq!(collection, get_collection(collection_id, &pool).await); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_once() { + let logctx = dev::test_setup_log("test_detach_once"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. Attach them. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach the resource from the collection. + let detach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + // "detach_and_get_result_async" should return the "detached" resource. + let returned_resource = detach.expect("Detach should have worked"); + assert!( + returned_resource.collection_id.is_none(), + ); + // The returned value should be the latest value in the DB. + assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_once_synchronous() { + let logctx = dev::test_setup_log("test_detach_once_synchronous"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach the resource to the collection. + let detach_query = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ); + + type TxnError = TransactionError< + DetachError, + >; + let result = pool + .pool() + .transaction(move |conn| { + detach_query.detach_and_get_result(conn).map_err(|e| match e { + DetachError::DatabaseError(e) => TxnError::from(e), + e => TxnError::CustomError(e), + }) + }) + .await; + + // "detach_and_get_result" should return the "detached" resource. + let returned_resource = result.expect("Detach should have worked"); + assert!( + returned_resource.collection_id.is_none() + ); + // The returned values should be the latest value in the DB. + assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_while_already_detached() { + let logctx = dev::test_setup_log("test_detach_while_already_detached"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + let collection = + insert_collection(collection_id, "collection", &pool).await; + let resource_id = uuid::Uuid::new_v4(); + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach a resource from a collection, as usual. + let detach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + assert_eq!( + detach.expect("Detach should have worked").id(), + resource_id + ); + + // Try detaching once more + let detach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + let err = detach.expect_err("Should have failed to detach"); + + // A caller should be able to inspect this result, the resource is + // already detached. + match err { + DetachError::NoUpdate { resource, collection } => { + assert!( + resource + .collection_id + .as_ref() + .is_none() + ); + assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!( + collection, + get_collection(collection_id, &pool).await + ); + } + _ => panic!("Unexpected error: {:?}", err), + }; + + // The generation number should only have bumped once, from the original + // resource insertion. + assert_eq!( + collection.rcgen + 1, + get_collection(collection_id, &pool).await.rcgen + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_deleted_resource_fails() { + let logctx = dev::test_setup_log("test_detach_deleted_resource_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + + // Immediately soft-delete the resource. + diesel::update( + resource::table.filter(resource::dsl::id.eq(resource_id)), + ) + .set(resource::dsl::time_deleted.eq(Utc::now())) + .execute_async(pool.pool()) + .await + .unwrap(); + + // Detach the resource to the collection. Observe a failure which is + // indistinguishable from the resource not existing. + let attach = Collection::detach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + assert!(matches!(attach, Err(DetachError::ResourceNotFound))); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_without_update_filter() { + let logctx = dev::test_setup_log("test_detach_without_update_filter"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + // Create the collection and some resources. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let resource_id1 = uuid::Uuid::new_v4(); + let resource_id2 = uuid::Uuid::new_v4(); + let _resource1 = + insert_resource(resource_id1, "resource1", &pool).await; + attach_resource(collection_id, resource_id1, &pool).await; + let _resource2 = + insert_resource(resource_id2, "resource2", &pool).await; + attach_resource(collection_id, resource_id2, &pool).await; + + // Detach the resource from the collection. + // + // NOTE: In the update statement, we aren't filtering by resource ID, + // even though we explicitly have two "live" resources". + let detach = Collection::detach_resource( + collection_id, + resource_id1, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + let returned_resource = detach.expect("Detach should have worked"); + assert_eq!(returned_resource.id(), resource_id1); + + // Note that only "resource1" should be detached. + // "resource2" should have automatically been filtered away from the + // update statement, regardless of user input. + assert!( + get_resource(resource_id1, &pool) + .await + .collection_id + .is_none() + ); + assert!( + get_resource(resource_id2, &pool) + .await + .collection_id + .is_some() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs index 24985e5042d..62644502e01 100644 --- a/nexus/src/db/datastore.rs +++ b/nexus/src/db/datastore.rs @@ -28,6 +28,8 @@ use super::Pool; use crate::authn; use crate::authz::{self, ApiResource}; use crate::context::OpContext; +use crate::db::collection_attach::{AttachError, DatastoreAttachTarget}; +use crate::db::collection_detach::{DetachError, DatastoreDetachTarget}; use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; use crate::db::fixed_data::role_builtin::BUILTIN_ROLES; use crate::db::fixed_data::silo::{DEFAULT_SILO, SILO_ID}; @@ -1111,6 +1113,299 @@ impl DataStore { .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) } + /// Attaches a disk to an instance, if both objects: + /// - Exist + /// - Are in valid states + /// - Are under the maximum "attach count" threshold + pub async fn disk_attach( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + authz_disk: &authz::Disk, + max_disks: u32, + ) -> Result<(Instance, Disk), Error> { + use db::schema::{disk, instance}; + + opctx.authorize(authz::Action::Modify, authz_instance).await?; + opctx.authorize(authz::Action::Modify, authz_disk).await?; + + let ok_to_attach_disk_states = vec![ + api::external::DiskState::Creating, + api::external::DiskState::Detached, + ]; + let ok_to_attach_disk_state_labels: Vec<_> = + ok_to_attach_disk_states.iter().map(|s| s.label()).collect(); + + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // This list of instance attach states is more restrictive than it + // plausibly could be. + // + // We currently only permit attaching disks to stopped instances. + let ok_to_attach_instance_states = vec![ + db::model::InstanceState(api::external::InstanceState::Creating), + db::model::InstanceState(api::external::InstanceState::Stopped), + ]; + + let attached_label = api::external::DiskState::Attached( + authz_instance.id(), + ).label(); + + // TODO "u32" seems reasonable for the max disks value (input / output) + let (instance, disk) = Instance::attach_resource( + authz_instance.id(), + authz_disk.id(), + instance::table + .into_boxed() + .filter(instance::dsl::state.eq_any(ok_to_attach_instance_states)), + disk::table + .into_boxed() + .filter(disk::dsl::disk_state.eq_any(ok_to_attach_disk_state_labels)), + // TODO: Remove unwrap? + usize::try_from(max_disks).unwrap(), + diesel::update(disk::dsl::disk) + .set(( + disk::dsl::disk_state.eq(attached_label), + disk::dsl::attach_instance_id.eq(authz_instance.id()) + )) + ) + .attach_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .or_else(|e| { + match e { + AttachError::CollectionNotFound => { + Err(Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + )) + }, + AttachError::ResourceNotFound => { + Err(Error::not_found_by_id( + ResourceType::Disk, + &authz_disk.id(), + )) + }, + AttachError::NoUpdate { attached_count, resource, collection } => { + let disk_state = resource.state().into(); + match disk_state { + // Idempotent errors: We did not perform an update, + // because we're already in the process of attaching. + api::external::DiskState::Attached(id) if id == authz_instance.id() => { + return Ok((collection, resource)); + } + api::external::DiskState::Attaching(id) if id == authz_instance.id() => { + return Ok((collection, resource)); + } + // Ok-to-attach disk states: Inspect the state to infer + // why we did not attach. + api::external::DiskState::Creating | + api::external::DiskState::Detached => { + match collection.runtime_state.state.state() { + // Ok-to-be-attached instance states: + api::external::InstanceState::Creating | + api::external::InstanceState::Stopped => { + // The disk is ready to be attached, and the + // instance is ready to be attached. Perhaps + // we are at attachment capacity? + if attached_count == i64::from(max_disks) { + return Err(Error::invalid_request(&format!( + "cannot attach more than {} disks to instance", + max_disks + ))); + } + + // We can't attach, but the error hasn't + // helped us infer why. + return Err(Error::internal_error( + "cannot attach disk" + )); + } + // Not okay-to-be-attached instance states: + _ => { + Err(Error::invalid_request(&format!( + "cannot attach disk to instance in {} state", + collection.runtime_state.state.state(), + ))) + } + } + }, + // Not-okay-to-attach disk states: The disk is attached elsewhere. + api::external::DiskState::Attached(_) | + api::external::DiskState::Attaching(_) | + api::external::DiskState::Detaching(_) => { + Err(Error::invalid_request(&format!( + "cannot attach disk \"{}\": disk is attached to another instance", + resource.name().as_str(), + ))) + } + _ => { + Err(Error::invalid_request(&format!( + "cannot attach disk \"{}\": invalid state {}", + resource.name().as_str(), + disk_state, + ))) + } + } + }, + AttachError::DatabaseError(e) => { + Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + }, + } + })?; + + Ok((instance, disk)) + } + + pub async fn disk_detach( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + authz_disk: &authz::Disk, + disk: &Disk, + ) -> Result { + use db::schema::{disk, instance}; + + opctx.authorize(authz::Action::Modify, authz_instance).await?; + opctx.authorize(authz::Action::Modify, authz_disk).await?; + + let new_runtime = disk.runtime().detach(); + + let disk_id = authz_disk.id(); + use db::schema::disk::dsl; + let updated = diesel::update(dsl::disk) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(disk_id)) + .filter(dsl::state_generation.lt(new_runtime.gen)) + .filter(dsl::attach_instance_id.eq(authz_instance.id())) + .set(new_runtime.clone()) + .check_if_exists::(disk_id) + .execute_and_check(self.pool()) + .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_disk), + ) + })?; + + let ok_to_detach_disk_states = vec![ + api::external::DiskState::Attached(authz_instance.id()), + ]; + let ok_to_detach_disk_state_labels: Vec<_> = + ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); + + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // This list of instance detach states is more restrictive than it + // plausibly could be. + // + // We currently only permit detaching disks from stopped instances. + let ok_to_detach_instance_states = vec![ + db::model::InstanceState(api::external::InstanceState::Creating), + db::model::InstanceState(api::external::InstanceState::Stopped), + ]; + + let detached_label = api::external::DiskState::Detached.label(); + + let disk = Instance::detach_resource( + authz_instance.id(), + authz_disk.id(), + instance::table + .into_boxed() + .filter(instance::dsl::state.eq_any(ok_to_detach_instance_states)), + disk::table + .into_boxed() + .filter(disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels)), + diesel::update(disk::dsl::disk) + .set(( + disk::dsl::disk_state.eq(detached_label), + disk::dsl::attach_instance_id.eq(Option::::None) + )) + ) + .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .or_else(|e| { + match e { + DetachError::CollectionNotFound => { + Err(Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + )) + }, + DetachError::ResourceNotFound => { + Err(Error::not_found_by_id( + ResourceType::Disk, + &authz_disk.id(), + )) + }, + DetachError::NoUpdate { resource, collection } => { + let disk_state = resource.state().into(); + match disk_state { + // Idempotent errors: We did not perform an update, + // because we're already in the process of detaching. + api::external::DiskState::Detached => { + return Ok(resource); + } + api::external::DiskState::Detaching(id) if id == authz_instance.id() => { + return Ok(resource); + } + // Ok-to-detach disk states: Inspect the state to infer + // why we did not detach. + api::external::DiskState::Attached(id) if id == authz_instance.id() => { + match collection.runtime_state.state.state() { + // Ok-to-be-detached instance states: + api::external::InstanceState::Creating | + api::external::InstanceState::Stopped => { + // We can't detach, but the error hasn't + // helped us infer why. + return Err(Error::internal_error( + "cannot detach disk" + )); + } + // Not okay-to-be-detached instance states: + _ => { + Err(Error::invalid_request(&format!( + "cannot detach disk from instance in {} state", + collection.runtime_state.state.state(), + ))) + } + } + }, + api::external::DiskState::Attaching(id) if id == authz_instance.id() => { + Err(Error::invalid_request(&format!( + "cannot detach disk \"{}\": disk is currently being attached", + resource.name().as_str(), + ))) + }, + // Not-okay-to-detach disk states: The disk is attached elsewhere. + api::external::DiskState::Attached(_) | + api::external::DiskState::Attaching(_) | + api::external::DiskState::Detaching(_) => { + Err(Error::invalid_request(&format!( + "cannot detach disk \"{}\": disk is attached to another instance", + resource.name().as_str(), + ))) + } + _ => { + Err(Error::invalid_request(&format!( + "cannot detach disk \"{}\": invalid state {}", + resource.name().as_str(), + disk_state, + ))) + } + } + }, + DetachError::DatabaseError(e) => { + Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + }, + } + })?; + + Ok(disk) + } + pub async fn disk_update_runtime( &self, opctx: &OpContext, diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index 8da5a6aef51..f180006ba15 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -7,6 +7,7 @@ // This is not intended to be public, but this is necessary to use it from // doctests pub mod collection_attach; +pub mod collection_detach; pub mod collection_insert; mod config; diff --git a/nexus/src/db/model/instance.rs b/nexus/src/db/model/instance.rs index eed074052f2..556ce16fe82 100644 --- a/nexus/src/db/model/instance.rs +++ b/nexus/src/db/model/instance.rs @@ -2,9 +2,10 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{ByteCount, Generation, InstanceCpuCount, InstanceState}; +use super::{ByteCount, Disk, Generation, InstanceCpuCount, InstanceState}; +use crate::db::collection_attach::DatastoreAttachTarget; use crate::db::identity::Resource; -use crate::db::schema::instance; +use crate::db::schema::{disk, instance}; use crate::external_api::params; use chrono::{DateTime, Utc}; use db_macros::Resource; @@ -27,6 +28,9 @@ pub struct Instance { /// user data for instance initialization systems (e.g. cloud-init) pub user_data: Vec, + /// Child Resource generation number + pub rcgen: Generation, + /// runtime state of the Instance #[diesel(embed)] pub runtime_state: InstanceRuntimeState, @@ -45,6 +49,7 @@ impl Instance { identity, project_id, user_data: params.user_data.clone(), + rcgen: Generation::new(), runtime_state: runtime, } } @@ -68,6 +73,18 @@ impl Into for Instance { } } +impl DatastoreAttachTarget for Instance { + type Id = Uuid; + + type CollectionIdColumn = instance::dsl::id; + type CollectionGenerationColumn = instance::dsl::rcgen; + type CollectionTimeDeletedColumn = instance::dsl::time_deleted; + + type ResourceIdColumn = disk::dsl::id; + type ResourceCollectionIdColumn = disk::dsl::attach_instance_id; + type ResourceTimeDeletedColumn = disk::dsl::time_deleted; +} + /// Runtime state of the Instance, including the actual running state and minimal /// metadata /// diff --git a/nexus/src/db/schema.rs b/nexus/src/db/schema.rs index 66d25ea7a8f..2db9a3bc9d7 100644 --- a/nexus/src/db/schema.rs +++ b/nexus/src/db/schema.rs @@ -89,6 +89,7 @@ table! { time_deleted -> Nullable, project_id -> Uuid, user_data -> Binary, + rcgen -> Int8, state -> crate::db::model::InstanceStateEnum, time_state_updated -> Timestamptz, state_generation -> Int8, From e3e9807797a9718d6750e560ab73a57bf3e477f3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 23 May 2022 09:29:55 -0400 Subject: [PATCH 13/29] use 'disk_detach' --- nexus/src/app/instance.rs | 16 ++++------ nexus/src/db/collection_attach.rs | 29 +++++++++++++----- nexus/src/db/collection_detach.rs | 50 +++++++++++++------------------ nexus/src/db/datastore.rs | 37 ++++------------------- 4 files changed, 53 insertions(+), 79 deletions(-) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index b8e9c592591..b5793084173 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -19,7 +19,6 @@ use omicron_common::api::external; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; -use omicron_common::api::external::DiskState; use omicron_common::api::external::Error; use omicron_common::api::external::InstanceState; use omicron_common::api::external::ListResultVec; @@ -610,7 +609,8 @@ impl super::Nexus { // "Attached". // - We should then issue a request to the associated sled agent. // - Once that completes, we should update the disk state to "Attached". - let (_instance, disk) = self.db_datastore + let (_instance, disk) = self + .db_datastore .disk_attach( &opctx, &authz_instance, @@ -630,7 +630,7 @@ impl super::Nexus { instance_name: &Name, disk_name: &Name, ) -> UpdateResult { - let (.., authz_project, authz_disk, db_disk) = + let (.., authz_project, authz_disk, _db_disk) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -654,13 +654,9 @@ impl super::Nexus { // "Detached". // - We should then issue a request to the associated sled agent. // - Once that completes, we should update the disk state to "Detached". - let disk = self.db_datastore - .disk_detach( - &opctx, - &authz_instance, - &authz_disk, - &db_disk, - ) + let disk = self + .db_datastore + .disk_detach(&opctx, &authz_instance, &authz_disk) .await?; Ok(disk) } diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index da167bab7fb..5a18367d339 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -433,8 +433,10 @@ pub type AsyncAttachToCollectionResult = Result<(C, ResourceType), AttachError>; /// Result of [`AttachToCollectionStatement`] when executed synchronously -pub type SyncAttachToCollectionResult = - Result<(C, ResourceType), AttachError>; +pub type SyncAttachToCollectionResult = Result< + (C, ResourceType), + AttachError, +>; /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] @@ -729,7 +731,9 @@ where out.push_identifier(CollectionPrimaryKey::::NAME)?; out.push_sql(" IN (SELECT "); out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql(" FROM collection_info) AND (SELECT * FROM do_update) RETURNING "); + out.push_sql( + " FROM collection_info) AND (SELECT * FROM do_update) RETURNING ", + ); self.collection_returning_clause.walk_ast(out.reborrow())?; out.push_sql("), "); @@ -1184,13 +1188,17 @@ mod test { .await; // "attach_and_get_result_async" should return the "attached" resource. - let (returned_collection, returned_resource) = attach.expect("Attach should have worked"); + let (returned_collection, returned_resource) = + attach.expect("Attach should have worked"); assert_eq!( returned_resource.collection_id.expect("Expected a collection ID"), collection_id ); // The returned value should be the latest value in the DB. - assert_eq!(returned_collection, get_collection(collection_id, &pool).await); + assert_eq!( + returned_collection, + get_collection(collection_id, &pool).await + ); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); // The generation number should have incremented in the collection. assert_eq!( @@ -1244,13 +1252,17 @@ mod test { .await; // "attach_and_get_result" should return the "attached" resource. - let (returned_collection, returned_resource) = result.expect("Attach should have worked"); + let (returned_collection, returned_resource) = + result.expect("Attach should have worked"); assert_eq!( returned_resource.collection_id.expect("Expected a collection ID"), collection_id ); // The returned values should be the latest value in the DB. - assert_eq!(returned_collection, get_collection(collection_id, &pool).await); + assert_eq!( + returned_collection, + get_collection(collection_id, &pool).await + ); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); // The generation number should have incremented in the collection. assert_eq!( @@ -1299,7 +1311,8 @@ mod test { .await; // "attach_and_get_result_async" should return the "attached" resource. - let (_, returned_resource) = attach.expect("Attach should have worked"); + let (_, returned_resource) = + attach.expect("Attach should have worked"); assert_eq!( returned_resource .collection_id diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 9119ba12e12..90e5a1597ba 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -11,8 +11,8 @@ //! - Updates the resource row use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; use crate::db::collection_attach::DatastoreAttachTarget; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; use diesel::associations::HasTable; use diesel::expression::Expression; use diesel::helper_types::*; @@ -278,7 +278,10 @@ pub trait DatastoreDetachTarget: Selectable { resource_query .filter(resource_table().primary_key().eq(resource_id)) .filter(Self::ResourceTimeDeletedColumn::default().is_null()) - .filter(Self::ResourceCollectionIdColumn::default().eq(collection_id)), + .filter( + Self::ResourceCollectionIdColumn::default() + .eq(collection_id), + ), ); let update_resource_statement = update @@ -298,7 +301,9 @@ pub trait DatastoreDetachTarget: Selectable { } impl DatastoreDetachTarget for T -where T: DatastoreAttachTarget { +where + T: DatastoreAttachTarget, +{ type Id = T::Id; type CollectionIdColumn = T::CollectionIdColumn; type CollectionTimeDeletedColumn = T::CollectionTimeDeletedColumn; @@ -638,8 +643,8 @@ where #[cfg(test)] mod test { + use super::{DatastoreDetachTarget, DetachError}; use crate::db::collection_attach::DatastoreAttachTarget; - use super::{DetachError, DatastoreDetachTarget}; use crate::db::{ self, error::TransactionError, identity::Resource as IdentityResource, }; @@ -812,7 +817,7 @@ mod test { resource::table.into_boxed(), 100, diesel::update(resource::table) - .set(resource::dsl::collection_id.eq(collection_id)) + .set(resource::dsl::collection_id.eq(collection_id)), ) .attach_and_get_result_async(pool.pool()) .await @@ -1037,9 +1042,7 @@ mod test { // "detach_and_get_result_async" should return the "detached" resource. let returned_resource = detach.expect("Detach should have worked"); - assert!( - returned_resource.collection_id.is_none(), - ); + assert!(returned_resource.collection_id.is_none(),); // The returned value should be the latest value in the DB. assert_eq!(returned_resource, get_resource(resource_id, &pool).await); @@ -1090,9 +1093,7 @@ mod test { // "detach_and_get_result" should return the "detached" resource. let returned_resource = result.expect("Detach should have worked"); - assert!( - returned_resource.collection_id.is_none() - ); + assert!(returned_resource.collection_id.is_none()); // The returned values should be the latest value in the DB. assert_eq!(returned_resource, get_resource(resource_id, &pool).await); @@ -1150,12 +1151,7 @@ mod test { // already detached. match err { DetachError::NoUpdate { resource, collection } => { - assert!( - resource - .collection_id - .as_ref() - .is_none() - ); + assert!(resource.collection_id.as_ref().is_none()); assert_eq!(resource, get_resource(resource_id, &pool).await); assert_eq!( collection, @@ -1264,18 +1260,14 @@ mod test { // Note that only "resource1" should be detached. // "resource2" should have automatically been filtered away from the // update statement, regardless of user input. - assert!( - get_resource(resource_id1, &pool) - .await - .collection_id - .is_none() - ); - assert!( - get_resource(resource_id2, &pool) - .await - .collection_id - .is_some() - ); + assert!(get_resource(resource_id1, &pool) + .await + .collection_id + .is_none()); + assert!(get_resource(resource_id2, &pool) + .await + .collection_id + .is_some()); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs index 62644502e01..95df564b267 100644 --- a/nexus/src/db/datastore.rs +++ b/nexus/src/db/datastore.rs @@ -29,7 +29,7 @@ use crate::authn; use crate::authz::{self, ApiResource}; use crate::context::OpContext; use crate::db::collection_attach::{AttachError, DatastoreAttachTarget}; -use crate::db::collection_detach::{DetachError, DatastoreDetachTarget}; +use crate::db::collection_detach::{DatastoreDetachTarget, DetachError}; use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; use crate::db::fixed_data::role_builtin::BUILTIN_ROLES; use crate::db::fixed_data::silo::{DEFAULT_SILO, SILO_ID}; @@ -1146,9 +1146,8 @@ impl DataStore { db::model::InstanceState(api::external::InstanceState::Stopped), ]; - let attached_label = api::external::DiskState::Attached( - authz_instance.id(), - ).label(); + let attached_label = + api::external::DiskState::Attached(authz_instance.id()).label(); // TODO "u32" seems reasonable for the max disks value (input / output) let (instance, disk) = Instance::attach_resource( @@ -1260,40 +1259,14 @@ impl DataStore { opctx: &OpContext, authz_instance: &authz::Instance, authz_disk: &authz::Disk, - disk: &Disk, ) -> Result { use db::schema::{disk, instance}; opctx.authorize(authz::Action::Modify, authz_instance).await?; opctx.authorize(authz::Action::Modify, authz_disk).await?; - let new_runtime = disk.runtime().detach(); - - let disk_id = authz_disk.id(); - use db::schema::disk::dsl; - let updated = diesel::update(dsl::disk) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(disk_id)) - .filter(dsl::state_generation.lt(new_runtime.gen)) - .filter(dsl::attach_instance_id.eq(authz_instance.id())) - .set(new_runtime.clone()) - .check_if_exists::(disk_id) - .execute_and_check(self.pool()) - .await - .map(|r| match r.status { - UpdateStatus::Updated => true, - UpdateStatus::NotUpdatedButExists => false, - }) - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_disk), - ) - })?; - - let ok_to_detach_disk_states = vec![ - api::external::DiskState::Attached(authz_instance.id()), - ]; + let ok_to_detach_disk_states = + vec![api::external::DiskState::Attached(authz_instance.id())]; let ok_to_detach_disk_state_labels: Vec<_> = ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); From 71fa5d9363135d10adad91df72210eb8d870cdc9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 23 May 2022 13:23:34 -0400 Subject: [PATCH 14/29] tests --- nexus/src/app/disk.rs | 5 + nexus/src/app/instance.rs | 22 +-- nexus/tests/integration_tests/disks.rs | 181 +++++++++---------------- 3 files changed, 82 insertions(+), 126 deletions(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 52492443c59..5a0f0ee4117 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -163,6 +163,11 @@ impl super::Nexus { /// Modifies the runtime state of the Disk as requested. This generally /// means attaching or detaching the disk. + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // This will be unused until we implement hot-plug support. + // However, it has been left for reference until then, as it will + // likely be needed once that feature is implemented. + #[allow(dead_code)] pub(crate) async fn disk_set_runtime( &self, opctx: &OpContext, diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index b5793084173..f3adb8cd415 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -605,10 +605,13 @@ impl super::Nexus { // on database state. // // To implement hot-plug support, we should do the following in a saga: - // - We should update the state to "Attaching", rather than - // "Attached". - // - We should then issue a request to the associated sled agent. - // - Once that completes, we should update the disk state to "Attached". + // - Update the state to "Attaching", rather than "Attached". + // - If the instance is running... + // - Issue a request to "disk attach" to the associated sled agent, + // using the "state generation" value from the moment we attached. + // - Update the DB if the request succeeded (hopefully to "Attached"). + // - If the instance is not running... + // - Update the disk state in the DB to "Attached". let (_instance, disk) = self .db_datastore .disk_attach( @@ -650,10 +653,13 @@ impl super::Nexus { // on database state. // // To implement hot-unplug support, we should do the following in a saga: - // - We should update the state to "Detaching", rather than - // "Detached". - // - We should then issue a request to the associated sled agent. - // - Once that completes, we should update the disk state to "Detached". + // - Update the state to "Detaching", rather than "Detached". + // - If the instance is running... + // - Issue a request to "disk detach" to the associated sled agent, + // using the "state generation" value from the moment we attached. + // - Update the DB if the request succeeded (hopefully to "Detached"). + // - If the instance is not running... + // - Update the disk state in the DB to "Detached". let disk = self .db_datastore .disk_detach(&opctx, &authz_instance, &authz_disk) diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a68504681b6..9c8a8910db5 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -24,6 +24,7 @@ use omicron_common::api::external::ByteCount; use omicron_common::api::external::Disk; use omicron_common::api::external::DiskState; use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::Instance; use omicron_common::api::external::Name; use omicron_nexus::TestInterfaces as _; use omicron_nexus::{external_api::params, Nexus}; @@ -113,6 +114,30 @@ async fn test_disk_not_found_before_creation( ); } +async fn set_instance_state( + client: &ClientTestContext, + instance_url: &str, + state: &str, +) -> Instance { + let url = format!("{}/{}", instance_url, state); + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &url) + .body(None as Option<&serde_json::Value>) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + +async fn instance_simulate(nexus: &Arc, id: &Uuid) { + let sa = nexus.instance_sled_by_id(id).await.unwrap(); + sa.instance_finish_transition(id.clone()).await; +} + #[nexus_test] async fn test_disk_create_attach_detach_delete( cptestctx: &ControlPlaneTestContext, @@ -157,6 +182,18 @@ async fn test_disk_create_attach_detach_delete( let instance = create_instance(&client, ORG_NAME, PROJECT_NAME, INSTANCE_NAME).await; + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // + // Instances must be stopped before disks can be attached - this + // is an artificial limitation without hotplug support. + let instance1_url = format!( + "/organizations/{}/projects/{}/instances/{}", + ORG_NAME, PROJECT_NAME, INSTANCE_NAME + ); + let instance_next = + set_instance_state(&client, &instance1_url, "stop").await; + instance_simulate(nexus, &instance_next.identity.id).await; + // Verify that there are no disks attached to the instance, and specifically // that our disk is not attached to this instance. let url_instance_disks = @@ -179,14 +216,6 @@ async fn test_disk_create_attach_detach_delete( let instance_id = &instance.identity.id; assert_eq!(attached_disk.identity.name, disk.identity.name); assert_eq!(attached_disk.identity.id, disk.identity.id); - assert_eq!(attached_disk.state, DiskState::Attaching(instance_id.clone())); - - // Finish simulation of the attachment and verify the new state, both on the - // attachment and the disk itself. - disk_simulate(nexus, &disk.identity.id).await; - let attached_disk: Disk = disk_get(&client, &disk_url).await; - assert_eq!(attached_disk.identity.name, disk.identity.name); - assert_eq!(attached_disk.identity.id, disk.identity.id); assert_eq!(attached_disk.state, DiskState::Attached(instance_id.clone())); // Attach the disk to the same instance. This should complete immediately @@ -194,8 +223,6 @@ async fn test_disk_create_attach_detach_delete( let disk = disk_post(client, &url_instance_attach_disk, disk.identity.name).await; assert_eq!(disk.state, DiskState::Attached(instance_id.clone())); - let disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Attached(instance_id.clone())); // Begin detaching the disk. let disk = disk_post( @@ -204,13 +231,6 @@ async fn test_disk_create_attach_detach_delete( disk.identity.name.clone(), ) .await; - assert_eq!(disk.state, DiskState::Detaching(instance_id.clone())); - let disk: Disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Detaching(instance_id.clone())); - - // Finish the detachment. - disk_simulate(nexus, &disk.identity.id).await; - let disk = disk_get(&client, &disk_url).await; assert_eq!(disk.state, DiskState::Detached); // Since detach is idempotent, we can detach it again. @@ -314,6 +334,17 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { // Create an instance to attach the disk. let instance = create_instance(&client, ORG_NAME, PROJECT_NAME, INSTANCE_NAME).await; + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // + // Instances must be stopped before disks can be attached - this + // is an artificial limitation without hotplug support. + let instance_url = format!( + "/organizations/{}/projects/{}/instances/{}", + ORG_NAME, PROJECT_NAME, INSTANCE_NAME + ); + let instance_next = + set_instance_state(&client, &instance_url, "stop").await; + instance_simulate(nexus, &instance_next.identity.id).await; // Verify that there are no disks attached to the instance, and specifically // that our disk is not attached to this instance. @@ -337,14 +368,6 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let instance_id = &instance.identity.id; assert_eq!(attached_disk.identity.name, disk.identity.name); assert_eq!(attached_disk.identity.id, disk.identity.id); - assert_eq!(attached_disk.state, DiskState::Attaching(instance_id.clone())); - - // Finish simulation of the attachment and verify the new state, both on the - // attachment and the disk itself. - disk_simulate(nexus, &disk.identity.id).await; - let attached_disk: Disk = disk_get(&client, &disk_url).await; - assert_eq!(attached_disk.identity.name, disk.identity.name); - assert_eq!(attached_disk.identity.id, disk.identity.id); assert_eq!(attached_disk.state, DiskState::Attached(instance_id.clone())); // Attach the disk to the same instance. This should complete immediately @@ -357,6 +380,14 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { // fail and the disk should remain attached to the first instance. let instance2 = create_instance(&client, ORG_NAME, PROJECT_NAME, "instance2").await; + let instance2_url = format!( + "/organizations/{}/projects/{}/instances/{}", + ORG_NAME, PROJECT_NAME, "instance2" + ); + let instance_next = + set_instance_state(&client, &instance2_url, "stop").await; + instance_simulate(nexus, &instance_next.identity.id).await; + let url_instance2_attach_disk = get_disk_attach_url(instance2.identity.name.as_str()); let url_instance2_detach_disk = @@ -389,64 +420,11 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { // Begin detaching the disk. let disk = disk_post(client, &url_instance_detach_disk, disk.identity.name).await; - assert_eq!(disk.state, DiskState::Detaching(instance_id.clone())); - let disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Detaching(instance_id.clone())); - - // It's still illegal to attach this disk elsewhere. - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &url_instance2_attach_disk) - .body(Some(¶ms::DiskIdentifier { - name: disk.identity.name.clone(), - })) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - error.message, - format!( - "cannot attach disk \"{}\": disk is attached to another instance", - DISK_NAME - ) - ); - - // It's even illegal to attach this disk back to the same instance. - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &url_instance_attach_disk) - .body(Some(¶ms::DiskIdentifier { - name: disk.identity.name.clone(), - })) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!( - error.message, - format!( - "cannot attach disk \"{}\": disk is attached to another instance", - DISK_NAME - ) - ); + assert_eq!(disk.state, DiskState::Detached); - // However, there's no problem attempting to detach it again. + // There's no problem attempting to detach it again. let disk = disk_post(client, &url_instance_detach_disk, disk.identity.name).await; - assert_eq!(disk.state, DiskState::Detaching(instance_id.clone())); - let disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Detaching(instance_id.clone())); - - // Finish the detachment. - disk_simulate(nexus, &disk.identity.id).await; - let disk = disk_get(&client, &disk_url).await; assert_eq!(disk.state, DiskState::Detached); // Since delete is idempotent, we can detach it again -- from either one. @@ -467,10 +445,7 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let instance2_id = &instance2.identity.id; assert_eq!(attached_disk.identity.name, disk.identity.name); assert_eq!(attached_disk.identity.id, disk.identity.id); - assert_eq!(attached_disk.state, DiskState::Attaching(instance2_id.clone())); - - let disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Attaching(instance2_id.clone())); + assert_eq!(attached_disk.state, DiskState::Attached(instance2_id.clone())); // At this point, it's not legal to attempt to attach it to a different // instance (the first one). @@ -502,11 +477,9 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { disk.identity.name.clone(), ) .await; - assert_eq!(disk.state, DiskState::Attaching(instance2_id.clone())); - let disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Attaching(instance2_id.clone())); + assert_eq!(disk.state, DiskState::Attached(instance2_id.clone())); - // It's not allowed to delete a disk that's attaching. + // It's not allowed to delete a disk that's attached. let error = NexusRequest::expect_failure( client, StatusCode::BAD_REQUEST, @@ -519,37 +492,15 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { .expect("expected request to fail") .parsed_body::() .expect("cannot parse"); - assert_eq!(error.message, "disk cannot be deleted in state \"attaching\""); + assert_eq!(error.message, "disk cannot be deleted in state \"attached\""); - // Now, begin a detach while the disk is still being attached. + // Now, begin a detach. let disk = disk_post( client, &url_instance2_detach_disk, disk.identity.name.clone(), ) .await; - assert_eq!(disk.state, DiskState::Detaching(instance2_id.clone())); - let disk: Disk = disk_get(&client, &disk_url).await; - assert_eq!(disk.state, DiskState::Detaching(instance2_id.clone())); - - // It's not allowed to delete a disk that's detaching, either. - let error = NexusRequest::expect_failure( - client, - StatusCode::BAD_REQUEST, - Method::DELETE, - &disk_url, - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("expected request to fail") - .parsed_body::() - .expect("cannot parse"); - assert_eq!(error.message, "disk cannot be deleted in state \"detaching\""); - - // Finish detachment. - disk_simulate(nexus, &disk.identity.id).await; - let disk = disk_get(&client, &disk_url).await; assert_eq!(disk.state, DiskState::Detached); // Now we can delete the disk. @@ -826,9 +777,3 @@ fn disks_eq(disk1: &Disk, disk2: &Disk) { assert_eq!(disk1.state, disk2.state); assert_eq!(disk1.device_path, disk2.device_path); } - -/// Simulate completion of an ongoing disk state transition. -async fn disk_simulate(nexus: &Arc, id: &Uuid) { - let sa = nexus.disk_sled_by_id(id).await.unwrap(); - sa.disk_finish_transition(id.clone()).await; -} From e96237d759f268f151a3c6711cf42457fb82ab88 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 23 May 2022 18:12:39 -0400 Subject: [PATCH 15/29] More readable bounds --- nexus/src/db/collection_detach.rs | 192 ++++++++++++++++-------------- 1 file changed, 105 insertions(+), 87 deletions(-) diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 90e5a1597ba..1da5cb73a01 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -14,7 +14,7 @@ use super::pool::DbConnection; use crate::db::collection_attach::DatastoreAttachTarget; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; use diesel::associations::HasTable; -use diesel::expression::Expression; +use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; use diesel::pg::Pg; use diesel::prelude::*; @@ -29,16 +29,28 @@ use std::fmt::Debug; type CollectionTable = <>::CollectionIdColumn as Column>::Table; + /// The table representing the resource. This table contains an /// ID acting as a foreign key into the collection table. type ResourceTable = <>::ResourceCollectionIdColumn as Column>::Table; + +/// The default WHERE clause of a table, when treated as an UPDATE target. +type TableDefaultWhereClause
=
::WhereClause; + +type FromClause = ::FromClause; +type QuerySqlType = ::SqlType; +type ExprSqlType = ::SqlType; + /// The default WHERE clause of the resource table. -type ResourceTableWhereClause = - as IntoUpdateTarget>::WhereClause; +type ResourceTableDefaultWhereClause = + TableDefaultWhereClause>; + +/// Helper to access column type. type CollectionIdColumn = >::CollectionIdColumn; +/// Helper to access column type. type ResourceIdColumn = >::ResourceIdColumn; @@ -48,6 +60,69 @@ impl TypesAreSame2 for (T, T) {} pub trait TypesAreSame3 {} impl TypesAreSame3 for (T, T, T) {} +/// Ensures that the type is a Diesel table, and that we can call ".table" and +/// ".into_boxed()" on it. +pub trait BoxableTable: HasTable
+ + 'static + + Send + + Table + + IntoUpdateTarget + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput, + > {} +impl BoxableTable for T +where + T: HasTable
+ + 'static + + Send + + Table + + IntoUpdateTarget + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput, + >, +{} + +/// Ensures that calling ".filter(predicate)" on this type is callable, and does +/// not change the underlying type. +pub trait FilterBy: query_methods::FilterDsl {} +impl FilterBy for T +where + T: query_methods::FilterDsl {} + +/// Allows calling ".into_boxed" on an update statement. +pub trait BoxableUpdateStatement: + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement< + 'static, + Pg, + Table, + V + >, + > +where + Table: QuerySource {} + +impl BoxableUpdateStatement for T +where + T: query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement< + 'static, + Pg, + Table, + V, + >, + >, + Table: QuerySource, +{} + /// Trait to be implemented by structs representing a detachable collection. /// /// A blanket implementation is provided for traits that implement @@ -108,7 +183,7 @@ pub trait DatastoreDetachTarget: Selectable { // value. update: UpdateStatement< ResourceTable, - ResourceTableWhereClause, + ResourceTableDefaultWhereClause, V, >, ) -> DetachFromCollectionStatement @@ -125,112 +200,55 @@ pub trait DatastoreDetachTarget: Selectable { ::Table, ): TypesAreSame3, Self: Sized, - // Enables the "table()" method on the Collection. - CollectionTable: HasTable
> - + 'static - + Send - + Table - // Allows calling ".into_boxed()" on the table. - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput>, - >, - // Enables the "table()" method on the Resource. - ResourceTable: HasTable
> - + 'static - + Send - + Table - // Allows calling ".into_boxed()" on the table. - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput>, - >, + + // Treat the collection and resource as boxed tables. + CollectionTable: BoxableTable, + ResourceTable: BoxableTable, + // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". - as QuerySource>::FromClause: - QueryFragment + Send, + FromClause>: QueryFragment + Send, + QuerySqlType>: Send, // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". - as QuerySource>::FromClause: - QueryFragment + Send, - // Allows sending "collection_exists_query" between threads. - as AsQuery>::SqlType: Send, - // Allows sending "resource_exists_query" between threads. - as AsQuery>::SqlType: Send, + FromClause>: QueryFragment + Send, + QuerySqlType>: Send, + // Allows calling ".filter()" on the boxed collection table. BoxedQuery>: - query_methods::FilterDsl< - Eq, Self::Id>, - Output = BoxedQuery>, - // Filter by time deleted = NULL - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, - >, + FilterBy, Self::Id>> + + FilterBy>, // Allows calling ".filter()" on the boxed resource table. - BoxedQuery>: query_methods::FilterDsl< - Eq, Self::Id>, - Output = BoxedQuery>, - // Filter by collection ID = ID - > + query_methods::FilterDsl< - Eq, - Output = BoxedQuery>, - // Filter by time deleted = NULL - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, - >, + BoxedQuery>: + FilterBy, Self::Id>> + + FilterBy> + + FilterBy>, - // See: "update_resource_statement". - // - // Allows referencing the default "WHERE" clause of the update - // statement. - ResourceTable: IntoUpdateTarget, // Allows calling "update.into_boxed()" UpdateStatement< ResourceTable, - ResourceTableWhereClause, + ResourceTableDefaultWhereClause, V, - >: query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedUpdateStatement< - 'static, - Pg, - ResourceTable, - V, - >, - >, + >: BoxableUpdateStatement, V>, + // Allows calling // ".filter(resource_table().primary_key().eq(resource_id)" on the // boxed update statement. BoxedUpdateStatement<'static, Pg, ResourceTable, V>: - query_methods::FilterDsl< - Eq, Self::Id>, - Output = BoxedUpdateStatement< - 'static, - Pg, - ResourceTable, - V, - >, - >, + FilterBy, Self::Id>>, // Allows using "id" in expressions (e.g. ".eq(...)") with... - Self::Id: diesel::expression::AsExpression< + Self::Id: AsExpression< // ... The Collection table's PK SerializedCollectionPrimaryKey, - > + diesel::expression::AsExpression< + > + AsExpression< // ... The Resource table's PK SerializedResourcePrimaryKey, - > + diesel::expression::AsExpression< + > + AsExpression< // ... The Resource table's FK to the Collection table SerializedResourceForeignKey, >, - as Expression>::SqlType: - SingleValue, - as Expression>::SqlType: - SingleValue, - ::SqlType: SingleValue, + ExprSqlType>: SingleValue, + ExprSqlType>: SingleValue, + ExprSqlType: SingleValue, // Allows calling "is_null()" on the following columns. Self::CollectionTimeDeletedColumn: ExpressionMethods, From b7091795e201d312672d536460cf364c7025779a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 10:30:19 -0400 Subject: [PATCH 16/29] more trait bound cleanup --- nexus/src/db/collection_attach.rs | 130 +++++++-------------------- nexus/src/db/collection_detach.rs | 95 ++------------------ nexus/src/db/cte_utils.rs | 99 ++++++++++++++++++++ nexus/src/db/mod.rs | 2 +- nexus/src/db/model/instance_state.rs | 2 + 5 files changed, 140 insertions(+), 188 deletions(-) create mode 100644 nexus/src/db/cte_utils.rs diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 5a18367d339..7dd4ba21412 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -13,10 +13,14 @@ //! - Updates the collection's resource generation number //! - Updates the resource row +use super::cte_utils::{ + BoxableUpdateStatement, BoxedQuery, BoxableTable, ExprSqlType, FilterBy, + QueryFromClause, QuerySqlType, TypesAreSame3, +}; use super::pool::DbConnection; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; use diesel::associations::HasTable; -use diesel::expression::Expression; +use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; use diesel::pg::Pg; use diesel::prelude::*; @@ -46,10 +50,6 @@ type CollectionIdColumn = type ResourceIdColumn = >::ResourceIdColumn; -/// Trick to check that columns come from the same table -pub trait TypesAreSame {} -impl TypesAreSame for (T, T, T) {} - /// Trait to be implemented by structs representing an attachable collection. /// /// For example, since Instances have a one-to-many relationship with @@ -182,124 +182,66 @@ pub trait DatastoreAttachTarget: Selectable { ::Table, ::Table, ::Table, - ): TypesAreSame, + ): TypesAreSame3, // Ensure the "resource" columns all belong to the same table. ( ::Table, ::Table, ::Table, - ): TypesAreSame, + ): TypesAreSame3, Self: Sized, - // Enables the "table()" method on the Collection. - CollectionTable: HasTable
> - + 'static - + Send - + Table - // Allows calling ".into_boxed()" on the table. - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput>, - >, - // Enables the "table()" method on the Resource. - ResourceTable: HasTable
> - + 'static - + Send - + Table - // Allows calling ".into_boxed()" on the table. - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput>, - >, + + // Treat the collection and resource as boxed tables. + CollectionTable: BoxableTable, + ResourceTable: BoxableTable, + // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". - as QuerySource>::FromClause: + QueryFromClause>: QueryFragment + Send, // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". - as QuerySource>::FromClause: + QueryFromClause>: QueryFragment + Send, // Allows sending "collection_exists_query" between threads. - as AsQuery>::SqlType: Send, + QuerySqlType>: Send, // Allows sending "resource_exists_query" between threads. - as AsQuery>::SqlType: Send, + QuerySqlType>: Send, // Allows calling ".filter()" on the boxed collection table. BoxedQuery>: - query_methods::FilterDsl< - Eq, Self::Id>, - Output = BoxedQuery>, - // Filter by time deleted = NULL - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, - >, + FilterBy, Self::Id>> + + FilterBy>, // Allows calling ".filter()" on the boxed resource table. - BoxedQuery>: query_methods::FilterDsl< - Eq, Self::Id>, - Output = BoxedQuery>, - // Filter by collection ID (when counting attached resources) - > + query_methods::FilterDsl< - Eq, - Output = BoxedQuery>, - // Filter by collection ID = NULL - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, - // Filter by time deleted = NULL - > + query_methods::FilterDsl< - IsNull, - Output = BoxedQuery>, - >, + BoxedQuery>: + FilterBy, Self::Id>> + + FilterBy> + + FilterBy> + + FilterBy>, - // See: "update_resource_statement". - // - // Allows referencing the default "WHERE" clause of the update - // statement. - ResourceTable: IntoUpdateTarget, // Allows calling "update.into_boxed()" UpdateStatement< ResourceTable, ResourceTableWhereClause, V, - >: query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedUpdateStatement< - 'static, - Pg, - ResourceTable, - V, - >, - >, + >: BoxableUpdateStatement, V>, // Allows calling // ".filter(resource_table().primary_key().eq(resource_id)" on the // boxed update statement. BoxedUpdateStatement<'static, Pg, ResourceTable, V>: - query_methods::FilterDsl< - Eq, Self::Id>, - Output = BoxedUpdateStatement< - 'static, - Pg, - ResourceTable, - V, - >, - >, + FilterBy, Self::Id>>, // Allows using "id" in expressions (e.g. ".eq(...)") with... - Self::Id: diesel::expression::AsExpression< + Self::Id: AsExpression< // ... The Collection table's PK SerializedCollectionPrimaryKey, - > + diesel::expression::AsExpression< + > + AsExpression< // ... The Resource table's PK SerializedResourcePrimaryKey, - > + diesel::expression::AsExpression< + > + AsExpression< // ... The Resource table's FK to the Collection table SerializedResourceForeignKey, >, - as Expression>::SqlType: - SingleValue, - as Expression>::SqlType: - SingleValue, - ::SqlType: SingleValue, + ExprSqlType>: SingleValue, + ExprSqlType>: SingleValue, + ExprSqlType: SingleValue, // Allows calling "is_null()" on the following columns. Self::CollectionTimeDeletedColumn: ExpressionMethods, @@ -583,16 +525,6 @@ type SerializedResourcePrimaryKey = type SerializedResourceForeignKey = as diesel::Expression>::SqlType; -type TableSqlType = ::SqlType; - -type BoxedQuery = diesel::helper_types::IntoBoxed<'static, T, Pg>; -type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< - 'static, - TableSqlType, - diesel::internal::table_macro::FromClause, - Pg, ->; - /// This implementation uses a CTE which attempts to do the following: /// /// 1. (collection_by_id, resource_by_id): Identify if the collection and diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 1da5cb73a01..dc16e7dfdcc 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -10,6 +10,11 @@ //! - Validates conditions on both the collection and resource //! - Updates the resource row +use super::cte_utils::{ + BoxableTable, BoxableUpdateStatement, BoxedQuery, FilterBy, + ExprSqlType, QuerySqlType, QueryFromClause, TypesAreSame2, + TypesAreSame3, TableDefaultWhereClause +}; use super::pool::DbConnection; use crate::db::collection_attach::DatastoreAttachTarget; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; @@ -36,13 +41,6 @@ type ResourceTable = <>::ResourceCollectionIdColumn as Column>::Table; -/// The default WHERE clause of a table, when treated as an UPDATE target. -type TableDefaultWhereClause
=
::WhereClause; - -type FromClause = ::FromClause; -type QuerySqlType = ::SqlType; -type ExprSqlType = ::SqlType; - /// The default WHERE clause of the resource table. type ResourceTableDefaultWhereClause = TableDefaultWhereClause>; @@ -54,75 +52,6 @@ type CollectionIdColumn = type ResourceIdColumn = >::ResourceIdColumn; -/// Trick to check that columns come from the same table -pub trait TypesAreSame2 {} -impl TypesAreSame2 for (T, T) {} -pub trait TypesAreSame3 {} -impl TypesAreSame3 for (T, T, T) {} - -/// Ensures that the type is a Diesel table, and that we can call ".table" and -/// ".into_boxed()" on it. -pub trait BoxableTable: HasTable
- + 'static - + Send - + Table - + IntoUpdateTarget - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput, - > {} -impl BoxableTable for T -where - T: HasTable
- + 'static - + Send - + Table - + IntoUpdateTarget - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput, - >, -{} - -/// Ensures that calling ".filter(predicate)" on this type is callable, and does -/// not change the underlying type. -pub trait FilterBy: query_methods::FilterDsl {} -impl FilterBy for T -where - T: query_methods::FilterDsl {} - -/// Allows calling ".into_boxed" on an update statement. -pub trait BoxableUpdateStatement: - query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedUpdateStatement< - 'static, - Pg, - Table, - V - >, - > -where - Table: QuerySource {} - -impl BoxableUpdateStatement for T -where - T: query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedUpdateStatement< - 'static, - Pg, - Table, - V, - >, - >, - Table: QuerySource, -{} - /// Trait to be implemented by structs representing a detachable collection. /// /// A blanket implementation is provided for traits that implement @@ -206,10 +135,10 @@ pub trait DatastoreDetachTarget: Selectable { ResourceTable: BoxableTable, // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". - FromClause>: QueryFragment + Send, + QueryFromClause>: QueryFragment + Send, QuerySqlType>: Send, // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". - FromClause>: QueryFragment + Send, + QueryFromClause>: QueryFragment + Send, QuerySqlType>: Send, // Allows calling ".filter()" on the boxed collection table. @@ -511,16 +440,6 @@ type SerializedResourcePrimaryKey = type SerializedResourceForeignKey = as diesel::Expression>::SqlType; -type TableSqlType = ::SqlType; - -type BoxedQuery = diesel::helper_types::IntoBoxed<'static, T, Pg>; -type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< - 'static, - TableSqlType, - diesel::internal::table_macro::FromClause, - Pg, ->; - /// This implementation uses a CTE which attempts to do the following: /// /// 1. (collection_by_id, resource_by_id): Identify if the collection and diff --git a/nexus/src/db/cte_utils.rs b/nexus/src/db/cte_utils.rs new file mode 100644 index 00000000000..9ba77951527 --- /dev/null +++ b/nexus/src/db/cte_utils.rs @@ -0,0 +1,99 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Utilities for writing CTEs. + +use diesel::associations::HasTable; +use diesel::expression::Expression; +use diesel::pg::Pg; +use diesel::prelude::*; +use diesel::query_builder::*; +use diesel::query_dsl::methods as query_methods; +use diesel::query_source::Table; + +/// Trick to check that columns come from the same table +pub trait TypesAreSame2 {} +impl TypesAreSame2 for (T, T) {} +pub trait TypesAreSame3 {} +impl TypesAreSame3 for (T, T, T) {} + +/// The default WHERE clause of a table, when treated as an UPDATE target. +pub(crate) type TableDefaultWhereClause
=
::WhereClause; + +// Short-hand type accessors. +pub(crate) type QueryFromClause = ::FromClause; +pub(crate) type QuerySqlType = ::SqlType; +pub(crate) type ExprSqlType = ::SqlType; +type TableSqlType = ::SqlType; + +pub(crate) type BoxedQuery = diesel::helper_types::IntoBoxed<'static, T, Pg>; +pub(crate) type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< + 'static, + TableSqlType, + diesel::internal::table_macro::FromClause, + Pg, +>; + +/// Ensures that the type is a Diesel table, and that we can call ".table" and +/// ".into_boxed()" on it. +pub trait BoxableTable: HasTable
+ + 'static + + Send + + Table + + IntoUpdateTarget + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput, + > {} +impl BoxableTable for T +where + T: HasTable
+ + 'static + + Send + + Table + + IntoUpdateTarget + + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedDslOutput, + >, +{} + +/// Ensures that calling ".filter(predicate)" on this type is callable, and does +/// not change the underlying type. +pub trait FilterBy: query_methods::FilterDsl {} +impl FilterBy for T +where + T: query_methods::FilterDsl {} + +/// Allows calling ".into_boxed" on an update statement. +pub trait BoxableUpdateStatement: + query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement< + 'static, + Pg, + Table, + V + >, + > +where + Table: QuerySource {} +impl BoxableUpdateStatement for T +where + T: query_methods::BoxedDsl< + 'static, + Pg, + Output = BoxedUpdateStatement< + 'static, + Pg, + Table, + V, + >, + >, + Table: QuerySource, +{} + diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index f180006ba15..2c2fb0d7ecf 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -10,7 +10,7 @@ pub mod collection_attach; pub mod collection_detach; pub mod collection_insert; mod config; - +mod cte_utils; // This is marked public for use by the integration tests pub mod datastore; mod error; diff --git a/nexus/src/db/model/instance_state.rs b/nexus/src/db/model/instance_state.rs index c06118433d6..95847792226 100644 --- a/nexus/src/db/model/instance_state.rs +++ b/nexus/src/db/model/instance_state.rs @@ -25,6 +25,7 @@ impl_enum_wrapper!( Migrating => b"migrating" Repairing => b"repairing" Failed => b"failed" + Destroying => b"destroying" Destroyed => b"destroyed" ); @@ -52,6 +53,7 @@ impl From for sled_agent_client::types::InstanceState { Migrating => Output::Migrating, Repairing => Output::Repairing, Failed => Output::Failed, + Destroying => Output::Destroying, Destroyed => Output::Destroyed, } } From 362dfba9edbc84c052f0519f44e70b45e0087230 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 14:26:13 -0400 Subject: [PATCH 17/29] removing rcgen, WIP moving bounds to associated types --- nexus/src/db/collection_attach.rs | 180 +++++++----------------------- nexus/src/db/collection_detach.rs | 20 +--- nexus/src/db/model/instance.rs | 1 - 3 files changed, 42 insertions(+), 159 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 7dd4ba21412..e4284407002 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -10,12 +10,11 @@ //! - Validates conditions on both the collection and resource //! - Ensures the number of attached resources does not exceed //! a provided threshold -//! - Updates the collection's resource generation number //! - Updates the resource row use super::cte_utils::{ BoxableUpdateStatement, BoxedQuery, BoxableTable, ExprSqlType, FilterBy, - QueryFromClause, QuerySqlType, TypesAreSame3, + QueryFromClause, QuerySqlType, TypesAreSame2, TypesAreSame3, }; use super::pool::DbConnection; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; @@ -34,7 +33,7 @@ use std::fmt::Debug; /// this table. type CollectionTable = <>::CollectionGenerationColumn as Column>::Table; +>>::CollectionIdColumn as Column>::Table; /// The table representing the resource. This table contains an /// ID acting as a foreign key into the collection table. type ResourceTable = < = < = as IntoUpdateTarget>::WhereClause; -type CollectionGenerationColumn = - >::CollectionGenerationColumn; type CollectionIdColumn = >::CollectionIdColumn; type ResourceIdColumn = @@ -57,13 +54,11 @@ type ResourceIdColumn = /// ``` /// # use diesel::prelude::*; /// # use omicron_nexus::db::collection_attach::DatastoreAttachTarget; -/// # use omicron_nexus::db::model::Generation; /// # /// # table! { /// # test_schema.instance (id) { /// # id -> Uuid, /// # time_deleted -> Nullable, -/// # rcgen -> Int8, /// # } /// # } /// # @@ -88,7 +83,6 @@ type ResourceIdColumn = /// struct Instance { /// pub id: uuid::Uuid, /// pub time_deleted: Option>, -/// pub rcgen: Generation, /// } /// /// impl DatastoreAttachTarget for Instance { @@ -96,7 +90,6 @@ type ResourceIdColumn = /// type Id = uuid::Uuid; /// /// type CollectionIdColumn = instance::dsl::id; -/// type CollectionGenerationColumn = instance::dsl::rcgen; /// type CollectionTimeDeletedColumn = instance::dsl::time_deleted; /// /// type ResourceIdColumn = disk::dsl::id; @@ -104,30 +97,41 @@ type ResourceIdColumn = /// type ResourceTimeDeletedColumn = disk::dsl::time_deleted; /// } /// ``` -pub trait DatastoreAttachTarget: Selectable { +pub trait DatastoreAttachTarget: Selectable +// where +// <::Table as Table>::PrimaryKey: diesel::sql_types::SqlType, +//where +// ExprSqlType>: SingleValue, +// ExprSqlType>: SingleValue, +// ExprSqlType: SingleValue, +{ /// The Rust type of the collection and resource ids (typically Uuid). type Id: Copy + Debug + PartialEq + Send + 'static; +// AsExpression> + +// AsExpression> + +// AsExpression>; /// The primary key column of the collection. type CollectionIdColumn: Column; - /// The column in the CollectionTable that acts as a generation number. - /// This is the "resource-generation-number" in RFD 192. - type CollectionGenerationColumn: Column - + Default - + Expression; - /// The time deleted column in the CollectionTable - type CollectionTimeDeletedColumn: Column + Default; + type CollectionTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; /// The primary key column of the resource type ResourceIdColumn: Column; /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: Column + Default; + type ResourceCollectionIdColumn: Column + Default + ExpressionMethods; +// type ResourceCollectionIdColumn: Column
::Table> + +// Default + +// ExpressionMethods; /// The time deleted column in the ResourceTable - type ResourceTimeDeletedColumn: Column + Default; + type ResourceTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; /// Creates a statement for attaching a resource to the given collection. /// @@ -180,9 +184,8 @@ pub trait DatastoreAttachTarget: Selectable { // Ensure the "collection" columns all belong to the same table. ( ::Table, - ::Table, ::Table, - ): TypesAreSame3, + ): TypesAreSame2, // Ensure the "resource" columns all belong to the same table. ( ::Table, @@ -309,8 +312,6 @@ pub trait DatastoreAttachTarget: Selectable { .into_boxed() .filter(resource_table().primary_key().eq(resource_id)); - let collection_from_clause = collection_table().from_clause(); - let collection_returning_clause = Self::as_returning(); let resource_returning_clause = ResourceType::as_returning(); AttachToCollectionStatement { collection_exists_query, @@ -320,8 +321,6 @@ pub trait DatastoreAttachTarget: Selectable { resource_query, max_attached_resources, update_resource_statement, - collection_from_clause, - collection_returning_clause, resource_returning_clause, } } @@ -351,11 +350,6 @@ where // Update statement for the resource. update_resource_statement: BoxedUpdateStatement<'static, Pg, ResourceTable, V>, - // Describes the target of the collection table UPDATE. - collection_from_clause: - as QuerySource>::FromClause, - // Describes what should be returned after UPDATE-ing the collection. - collection_returning_clause: AsSelect, // Describes what should be returned after UPDATE-ing the resource. resource_returning_clause: AsSelect, } @@ -400,7 +394,7 @@ pub enum AttachError { /// Describes the type returned from the actual CTE, which is parsed /// and interpreted before propagating it to users of the Rust API. pub type RawOutput = - (i64, Option, Option, Option, Option); + (i64, Option, Option, Option); impl AttachToCollectionStatement where @@ -456,7 +450,6 @@ where attached_count, collection_before_update, resource_before_update, - collection_after_update, resource_after_update, ) = result; @@ -466,14 +459,13 @@ where let resource_before_update = resource_before_update .ok_or_else(|| AttachError::ResourceNotFound)?; - match (collection_after_update, resource_after_update) { - (Some(collection), Some(resource)) => Ok((collection, resource)), - (None, None) => Err(AttachError::NoUpdate { + match resource_after_update { + Some(resource) => Ok((collection_before_update, resource)), + None => Err(AttachError::NoUpdate { attached_count, resource: resource_before_update, collection: collection_before_update, }), - _ => panic!("Partial update applied - this is a CTE bug"), } } } @@ -494,8 +486,6 @@ where Nullable>, // If the resource exists, the value before update. Nullable>, - // If the collection was updated, the new value. - Nullable>, // If the resource was updated, the new value. Nullable>, ); @@ -535,8 +525,7 @@ type SerializedResourceForeignKey = /// constraints on the collection and resource objects. /// 4. (do_update): IFF all previous checks succeeded, make a decision to perfom /// an update. -/// 5. (updated_collection): Increase the generation number on the collection. -/// 6. (updated_resource): Apply user-provided updates on the resource - +/// 5. (updated_resource): Apply user-provided updates on the resource - /// presumably, setting the collection ID value. /// /// This is implemented as follows: @@ -582,12 +571,6 @@ type SerializedResourceForeignKey = /// // (SELECT * FROM resource_count) < , /// // TRUE, FALSE), /// // ), -/// // /* Update the generation number of the collection row */ -/// // updated_collection AS MATERIALIZED ( -/// // UPDATE C SET = + 1 -/// // WHERE IN (SELECT FROM collection_info) AND (SELECT * FROM do_update) -/// // RETURNING * -/// // ), /// // /* Update the resource */ /// // updated_resource AS ( /// // UPDATE R SET @@ -598,8 +581,7 @@ type SerializedResourceForeignKey = /// // (SELECT * FROM resource_count) /// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE /// // LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE -/// // LEFT JOIN (SELECT * FROM updated_collection) ON TRUE -/// // LEFT JOIN (SELECT * FROM resource) ON TRUE; +/// // LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; /// ``` impl QueryFragment for AttachToCollectionStatement @@ -607,16 +589,11 @@ where ResourceType: Selectable, C: DatastoreAttachTarget, CollectionPrimaryKey: diesel::Column, - // Necessary to "walk_ast" over "select.collection_from_clause". - as QuerySource>::FromClause: - QueryFragment, // Necessary to "walk_ast" over "self.update_resource_statement". BoxedUpdateStatement<'static, Pg, ResourceTable, V>: QueryFragment, // Necessary to "walk_ast" over "self.resource_returning_clause". AsSelect: QueryFragment, - // Necessary to "walk_ast" over "self.collection_returning_clause". - AsSelect: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); @@ -649,26 +626,6 @@ where self.max_attached_resources) ); - out.push_sql("updated_collection AS MATERIALIZED (UPDATE "); - self.collection_from_clause.walk_ast(out.reborrow())?; - out.push_sql(" SET "); - out.push_identifier( - CollectionGenerationColumn::::NAME, - )?; - out.push_sql(" = "); - out.push_identifier( - CollectionGenerationColumn::::NAME, - )?; - out.push_sql(" + 1 WHERE "); - out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql(" IN (SELECT "); - out.push_identifier(CollectionPrimaryKey::::NAME)?; - out.push_sql( - " FROM collection_info) AND (SELECT * FROM do_update) RETURNING ", - ); - self.collection_returning_clause.walk_ast(out.reborrow())?; - out.push_sql("), "); - out.push_sql("updated_resource AS ("); self.update_resource_statement.walk_ast(out.reborrow())?; // NOTE: It is safe to start with "AND" - we forced the update statement @@ -702,7 +659,6 @@ where (SELECT * FROM resource_count) \ LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ - LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ LEFT JOIN (SELECT * FROM updated_resource) ON TRUE;", ); @@ -738,7 +694,6 @@ mod test { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, - rcgen -> Int8, } } @@ -765,8 +720,7 @@ mod test { description STRING(512) NOT NULL, \ time_created TIMESTAMPTZ NOT NULL, \ time_modified TIMESTAMPTZ NOT NULL, \ - time_deleted TIMESTAMPTZ, \ - rcgen INT NOT NULL); \ + time_deleted TIMESTAMPTZ); \ CREATE TABLE IF NOT EXISTS test_schema.resource( \ id UUID PRIMARY KEY, \ name STRING(63) NOT NULL, \ @@ -803,14 +757,12 @@ mod test { struct Collection { #[diesel(embed)] pub identity: CollectionIdentity, - pub rcgen: i64, } impl DatastoreAttachTarget for Collection { type Id = uuid::Uuid; type CollectionIdColumn = collection::dsl::id; - type CollectionGenerationColumn = collection::dsl::rcgen; type CollectionTimeDeletedColumn = collection::dsl::time_deleted; type ResourceIdColumn = resource::dsl::id; @@ -829,7 +781,6 @@ mod test { }; let c = Collection { identity: CollectionIdentity::new(id, create_params), - rcgen: 1, }; diesel::insert_into(collection::table) @@ -909,8 +860,7 @@ mod test { \"test_schema\".\"collection\".\"description\", \ \"test_schema\".\"collection\".\"time_created\", \ \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\" \ + \"test_schema\".\"collection\".\"time_deleted\" \ FROM \"test_schema\".\"collection\" \ WHERE (\ (\"test_schema\".\"collection\".\"id\" = $1) AND \ @@ -947,8 +897,7 @@ mod test { \"test_schema\".\"collection\".\"description\", \ \"test_schema\".\"collection\".\"time_created\", \ \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\" \ + \"test_schema\".\"collection\".\"time_deleted\" \ FROM \"test_schema\".\"collection\" \ WHERE (\ (\"test_schema\".\"collection\".\"id\" = $4) AND \ @@ -979,23 +928,6 @@ mod test { TRUE,\ FALSE)\ ), \ - updated_collection AS MATERIALIZED (\ - UPDATE \ - \"test_schema\".\"collection\" \ - SET \ - \"rcgen\" = \"rcgen\" + 1 \ - WHERE \ - \"id\" IN (SELECT \"id\" FROM collection_info) AND \ - (SELECT * FROM do_update) \ - RETURNING \ - \"test_schema\".\"collection\".\"id\", \ - \"test_schema\".\"collection\".\"name\", \ - \"test_schema\".\"collection\".\"description\", \ - \"test_schema\".\"collection\".\"time_created\", \ - \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\"\ - ), \ updated_resource AS (\ UPDATE \ \"test_schema\".\"resource\" \ @@ -1017,7 +949,6 @@ mod test { (SELECT * FROM resource_count) \ LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE \ - LEFT JOIN (SELECT * FROM updated_collection) ON TRUE \ LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, cccccccc-cccc-cccc-cccc-cccccccccccc, aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa]"; assert_eq!(query, expected_query); } @@ -1102,7 +1033,7 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; let _resource = insert_resource(resource_id, "resource", &pool).await; @@ -1132,11 +1063,6 @@ mod test { get_collection(collection_id, &pool).await ); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); - // The generation number should have incremented in the collection. - assert_eq!( - collection.rcgen + 1, - get_collection(collection_id, &pool).await.rcgen - ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1155,7 +1081,7 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; let _resource = insert_resource(resource_id, "resource", &pool).await; @@ -1196,11 +1122,6 @@ mod test { get_collection(collection_id, &pool).await ); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); - // The generation number should have incremented in the collection. - assert_eq!( - collection.rcgen + 1, - get_collection(collection_id, &pool).await.rcgen - ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1220,7 +1141,7 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Create the collection. - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; // Create each resource, attaching them to the collection. @@ -1256,12 +1177,6 @@ mod test { returned_resource, get_resource(resource_id, &pool).await ); - - // The generation number should have incremented in the collection. - assert_eq!( - collection.rcgen + 1 + i64::try_from(i).unwrap(), - get_collection(collection_id, &pool).await.rcgen - ); } db.cleanup().await.unwrap(); @@ -1280,7 +1195,7 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; let resource_id1 = uuid::Uuid::new_v4(); let _resource = insert_resource(resource_id1, "resource1", &pool).await; @@ -1328,12 +1243,6 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - // The generation number should only have bumped once. - assert_eq!( - collection.rcgen + 1, - get_collection(collection_id, &pool).await.rcgen - ); - db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -1350,7 +1259,7 @@ mod test { let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; let resource_id = uuid::Uuid::new_v4(); let _resource = insert_resource(resource_id, "resource", &pool).await; @@ -1441,13 +1350,6 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - // The generation number should only have bumped once, from the original - // resource insertion. - assert_eq!( - collection.rcgen + 1, - get_collection(collection_id, &pool).await.rcgen - ); - db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -1465,7 +1367,7 @@ mod test { let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; let _resource = insert_resource(resource_id, "resource", &pool).await; @@ -1502,10 +1404,6 @@ mod test { ); assert_eq!(returned_resource, get_resource(resource_id, &pool).await); assert_eq!(returned_resource.description(), "new description"); - assert_eq!( - collection.rcgen + 1, - get_collection(collection_id, &pool).await.rcgen - ); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index dc16e7dfdcc..b618a9d4a4a 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -607,7 +607,6 @@ mod test { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, - rcgen -> Int8, } } @@ -634,8 +633,7 @@ mod test { description STRING(512) NOT NULL, \ time_created TIMESTAMPTZ NOT NULL, \ time_modified TIMESTAMPTZ NOT NULL, \ - time_deleted TIMESTAMPTZ, \ - rcgen INT NOT NULL); \ + time_deleted TIMESTAMPTZ); \ CREATE TABLE IF NOT EXISTS test_schema.resource( \ id UUID PRIMARY KEY, \ name STRING(63) NOT NULL, \ @@ -672,14 +670,12 @@ mod test { struct Collection { #[diesel(embed)] pub identity: CollectionIdentity, - pub rcgen: i64, } impl DatastoreAttachTarget for Collection { type Id = uuid::Uuid; type CollectionIdColumn = collection::dsl::id; - type CollectionGenerationColumn = collection::dsl::rcgen; type CollectionTimeDeletedColumn = collection::dsl::time_deleted; type ResourceIdColumn = resource::dsl::id; @@ -698,7 +694,6 @@ mod test { }; let c = Collection { identity: CollectionIdentity::new(id, create_params), - rcgen: 1, }; diesel::insert_into(collection::table) @@ -796,8 +791,7 @@ mod test { \"test_schema\".\"collection\".\"description\", \ \"test_schema\".\"collection\".\"time_created\", \ \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\" \ + \"test_schema\".\"collection\".\"time_deleted\" \ FROM \"test_schema\".\"collection\" \ WHERE (\ (\"test_schema\".\"collection\".\"id\" = $1) AND \ @@ -826,8 +820,7 @@ mod test { \"test_schema\".\"collection\".\"description\", \ \"test_schema\".\"collection\".\"time_created\", \ \"test_schema\".\"collection\".\"time_modified\", \ - \"test_schema\".\"collection\".\"time_deleted\", \ - \"test_schema\".\"collection\".\"rcgen\" \ + \"test_schema\".\"collection\".\"time_deleted\" \ FROM \"test_schema\".\"collection\" \ WHERE (\ (\"test_schema\".\"collection\".\"id\" = $3) AND \ @@ -1098,13 +1091,6 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - // The generation number should only have bumped once, from the original - // resource insertion. - assert_eq!( - collection.rcgen + 1, - get_collection(collection_id, &pool).await.rcgen - ); - db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/src/db/model/instance.rs b/nexus/src/db/model/instance.rs index 556ce16fe82..e3086f16dd9 100644 --- a/nexus/src/db/model/instance.rs +++ b/nexus/src/db/model/instance.rs @@ -77,7 +77,6 @@ impl DatastoreAttachTarget for Instance { type Id = Uuid; type CollectionIdColumn = instance::dsl::id; - type CollectionGenerationColumn = instance::dsl::rcgen; type CollectionTimeDeletedColumn = instance::dsl::time_deleted; type ResourceIdColumn = disk::dsl::id; From f38c456d80385cde721ac48aaaa112003bb4cb35 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 14:45:58 -0400 Subject: [PATCH 18/29] less destroying --- nexus/src/db/model/instance_state.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/nexus/src/db/model/instance_state.rs b/nexus/src/db/model/instance_state.rs index 95847792226..c06118433d6 100644 --- a/nexus/src/db/model/instance_state.rs +++ b/nexus/src/db/model/instance_state.rs @@ -25,7 +25,6 @@ impl_enum_wrapper!( Migrating => b"migrating" Repairing => b"repairing" Failed => b"failed" - Destroying => b"destroying" Destroyed => b"destroyed" ); @@ -53,7 +52,6 @@ impl From for sled_agent_client::types::InstanceState { Migrating => Output::Migrating, Repairing => Output::Repairing, Failed => Output::Failed, - Destroying => Output::Destroying, Destroyed => Output::Destroyed, } } From 897b311b5e3e7b614ab0c2101a7fcfe9d0850d9b Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 14:54:34 -0400 Subject: [PATCH 19/29] Removed ExpressionMethods bound --- nexus/src/db/collection_attach.rs | 42 +++++++++++++++---------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index e4284407002..e23817670b9 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -14,7 +14,7 @@ use super::cte_utils::{ BoxableUpdateStatement, BoxedQuery, BoxableTable, ExprSqlType, FilterBy, - QueryFromClause, QuerySqlType, TypesAreSame2, TypesAreSame3, + QueryFromClause, QuerySqlType }; use super::pool::DbConnection; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; @@ -112,7 +112,8 @@ pub trait DatastoreAttachTarget: Selectable // AsExpression>; /// The primary key column of the collection. - type CollectionIdColumn: Column; + type CollectionIdColumn: Column + + Expression>; /// The time deleted column in the CollectionTable type CollectionTimeDeletedColumn: Column
::Table> + @@ -120,13 +121,15 @@ pub trait DatastoreAttachTarget: Selectable ExpressionMethods; /// The primary key column of the resource - type ResourceIdColumn: Column; + type ResourceIdColumn: Column + + Expression>; /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: Column + Default + ExpressionMethods; // type ResourceCollectionIdColumn: Column
::Table> + -// Default + -// ExpressionMethods; + type ResourceCollectionIdColumn: Column + + Default + +// Expression>> + + ExpressionMethods; /// The time deleted column in the ResourceTable type ResourceTimeDeletedColumn: Column
::Table> + @@ -181,17 +184,17 @@ pub trait DatastoreAttachTarget: Selectable >, ) -> AttachToCollectionStatement where - // Ensure the "collection" columns all belong to the same table. - ( - ::Table, - ::Table, - ): TypesAreSame2, - // Ensure the "resource" columns all belong to the same table. - ( - ::Table, - ::Table, - ::Table, - ): TypesAreSame3, +// // Ensure the "collection" columns all belong to the same table. +// ( +// ::Table, +// ::Table, +// ): TypesAreSame2, +// // Ensure the "resource" columns all belong to the same table. +// ( +// ::Table, +// ::Table, +// ::Table, +// ): TypesAreSame3, Self: Sized, // Treat the collection and resource as boxed tables. @@ -246,11 +249,6 @@ pub trait DatastoreAttachTarget: Selectable ExprSqlType>: SingleValue, ExprSqlType: SingleValue, - // Allows calling "is_null()" on the following columns. - Self::CollectionTimeDeletedColumn: ExpressionMethods, - Self::ResourceTimeDeletedColumn: ExpressionMethods, - Self::ResourceCollectionIdColumn: ExpressionMethods, - // Necessary to actually select the resource in the output type. ResourceType: Selectable, { From 36810b331212d1ea3f7099fda631484290ec5b06 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 18:08:05 -0400 Subject: [PATCH 20/29] Working through it --- nexus/src/db/collection_attach.rs | 42 ++++++++++++++++++++----------- nexus/src/db/collection_detach.rs | 4 +-- nexus/src/db/model/instance.rs | 1 + 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index e23817670b9..9330f416f24 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -28,6 +28,7 @@ use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; use diesel::sql_types::{BigInt, Nullable, SingleValue}; use std::fmt::Debug; +use uuid::Uuid; /// The table representing the collection. The resource references /// this table. @@ -38,7 +39,7 @@ type CollectionTable = < = <>::ResourceCollectionIdColumn as Column>::Table; +>>::ResourceIdColumn as Column>::Table; /// The default WHERE clause of the resource table. type ResourceTableWhereClause = as IntoUpdateTarget>::WhereClause; @@ -97,23 +98,32 @@ type ResourceIdColumn = /// type ResourceTimeDeletedColumn = disk::dsl::time_deleted; /// } /// ``` -pub trait DatastoreAttachTarget: Selectable -// where -// <::Table as Table>::PrimaryKey: diesel::sql_types::SqlType, +pub trait DatastoreAttachTarget: Selectable + Sized +// Uuid: AsExpression>, +// SerializedCollectionPrimaryKey: diesel::sql_types::SqlType, +// <<::Table as Table>::PrimaryKey as Expression>::SqlType: diesel::sql_types::SqlType, //where // ExprSqlType>: SingleValue, // ExprSqlType>: SingleValue, // ExprSqlType: SingleValue, { + + type SerializedId: Copy + Debug + Send + 'static + SingleValue + diesel::sql_types::SqlType; + /// The Rust type of the collection and resource ids (typically Uuid). - type Id: Copy + Debug + PartialEq + Send + 'static; -// AsExpression> + + type Id: Copy + Debug + PartialEq + Send + 'static + +// <::Table as Table>::PrimaryKey: diesel::sql_types::SqlType; +// AsExpression> + + AsExpression; +// AsExpression>; // AsExpression> + // AsExpression>; /// The primary key column of the collection. type CollectionIdColumn: Column + - Expression>; + Expression + + ExpressionMethods; +// <::Table as Table>::PrimaryKey: diesel::sql_types::SqlType; /// The time deleted column in the CollectionTable type CollectionTimeDeletedColumn: Column
::Table> + @@ -122,13 +132,13 @@ pub trait DatastoreAttachTarget: Selectable /// The primary key column of the resource type ResourceIdColumn: Column + - Expression>; + Expression + + ExpressionMethods; /// The column in the resource acting as a foreign key into the Collection -// type ResourceCollectionIdColumn: Column
::Table> + - type ResourceCollectionIdColumn: Column + + type ResourceCollectionIdColumn: Column
::Table> + Default + -// Expression>> + +// Expression> + ExpressionMethods; /// The time deleted column in the ResourceTable @@ -195,7 +205,6 @@ pub trait DatastoreAttachTarget: Selectable // ::Table, // ::Table, // ): TypesAreSame3, - Self: Sized, // Treat the collection and resource as boxed tables. CollectionTable: BoxableTable, @@ -235,13 +244,16 @@ pub trait DatastoreAttachTarget: Selectable FilterBy, Self::Id>>, // Allows using "id" in expressions (e.g. ".eq(...)") with... - Self::Id: AsExpression< + Self::Id: + AsExpression< // ... The Collection table's PK SerializedCollectionPrimaryKey, - > + AsExpression< + > + + AsExpression< // ... The Resource table's PK SerializedResourcePrimaryKey, - > + AsExpression< + > + + AsExpression< // ... The Resource table's FK to the Collection table SerializedResourceForeignKey, >, diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index b618a9d4a4a..ba31c7d51cf 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -28,6 +28,7 @@ use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; use diesel::sql_types::{Nullable, SingleValue}; use std::fmt::Debug; +use uuid::Uuid; /// The table representing the collection. The resource references /// this table. @@ -56,7 +57,7 @@ type ResourceIdColumn = /// /// A blanket implementation is provided for traits that implement /// [`DatastoreAttachTarget`]. -pub trait DatastoreDetachTarget: Selectable { +pub trait DatastoreDetachTarget: Selectable + Sized { /// The Rust type of the collection and resource ids (typically Uuid). type Id: Copy + Debug + PartialEq + Send + 'static; @@ -128,7 +129,6 @@ pub trait DatastoreDetachTarget: Selectable { ::Table, ::Table, ): TypesAreSame3, - Self: Sized, // Treat the collection and resource as boxed tables. CollectionTable: BoxableTable, diff --git a/nexus/src/db/model/instance.rs b/nexus/src/db/model/instance.rs index e3086f16dd9..eed2ec7c185 100644 --- a/nexus/src/db/model/instance.rs +++ b/nexus/src/db/model/instance.rs @@ -74,6 +74,7 @@ impl Into for Instance { } impl DatastoreAttachTarget for Instance { + type SerializedId = diesel::sql_types::Uuid; type Id = Uuid; type CollectionIdColumn = instance::dsl::id; From 3146cafe15c750cf7f8f9d3ab8e48f84f03df84c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 18:20:25 -0400 Subject: [PATCH 21/29] Removed dead code --- nexus/src/db/collection_attach.rs | 44 +++---------------------------- nexus/src/db/collection_detach.rs | 42 +++++++++++------------------ nexus/src/db/cte_utils.rs | 6 ----- nexus/src/db/model/instance.rs | 1 - 4 files changed, 20 insertions(+), 73 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index 9330f416f24..b9ccc76105f 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -28,7 +28,6 @@ use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; use diesel::sql_types::{BigInt, Nullable, SingleValue}; use std::fmt::Debug; -use uuid::Uuid; /// The table representing the collection. The resource references /// this table. @@ -98,32 +97,12 @@ type ResourceIdColumn = /// type ResourceTimeDeletedColumn = disk::dsl::time_deleted; /// } /// ``` -pub trait DatastoreAttachTarget: Selectable + Sized -// Uuid: AsExpression>, -// SerializedCollectionPrimaryKey: diesel::sql_types::SqlType, -// <<::Table as Table>::PrimaryKey as Expression>::SqlType: diesel::sql_types::SqlType, -//where -// ExprSqlType>: SingleValue, -// ExprSqlType>: SingleValue, -// ExprSqlType: SingleValue, -{ - - type SerializedId: Copy + Debug + Send + 'static + SingleValue + diesel::sql_types::SqlType; - +pub trait DatastoreAttachTarget: Selectable + Sized { /// The Rust type of the collection and resource ids (typically Uuid). - type Id: Copy + Debug + PartialEq + Send + 'static + -// <::Table as Table>::PrimaryKey: diesel::sql_types::SqlType; -// AsExpression> + - AsExpression; -// AsExpression>; -// AsExpression> + -// AsExpression>; + type Id: Copy + Debug + PartialEq + Send + 'static; /// The primary key column of the collection. - type CollectionIdColumn: Column + - Expression + - ExpressionMethods; -// <::Table as Table>::PrimaryKey: diesel::sql_types::SqlType; + type CollectionIdColumn: Column; /// The time deleted column in the CollectionTable type CollectionTimeDeletedColumn: Column
::Table> + @@ -131,14 +110,11 @@ pub trait DatastoreAttachTarget: Selectable + Sized ExpressionMethods; /// The primary key column of the resource - type ResourceIdColumn: Column + - Expression + - ExpressionMethods; + type ResourceIdColumn: Column; /// The column in the resource acting as a foreign key into the Collection type ResourceCollectionIdColumn: Column
::Table> + Default + -// Expression> + ExpressionMethods; /// The time deleted column in the ResourceTable @@ -194,18 +170,6 @@ pub trait DatastoreAttachTarget: Selectable + Sized >, ) -> AttachToCollectionStatement where -// // Ensure the "collection" columns all belong to the same table. -// ( -// ::Table, -// ::Table, -// ): TypesAreSame2, -// // Ensure the "resource" columns all belong to the same table. -// ( -// ::Table, -// ::Table, -// ::Table, -// ): TypesAreSame3, - // Treat the collection and resource as boxed tables. CollectionTable: BoxableTable, ResourceTable: BoxableTable, diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index ba31c7d51cf..48daa9ebc0e 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -11,9 +11,8 @@ //! - Updates the resource row use super::cte_utils::{ - BoxableTable, BoxableUpdateStatement, BoxedQuery, FilterBy, - ExprSqlType, QuerySqlType, QueryFromClause, TypesAreSame2, - TypesAreSame3, TableDefaultWhereClause + BoxableTable, BoxableUpdateStatement, BoxedQuery, FilterBy, ExprSqlType, + QuerySqlType, QueryFromClause, TableDefaultWhereClause }; use super::pool::DbConnection; use crate::db::collection_attach::DatastoreAttachTarget; @@ -28,7 +27,6 @@ use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; use diesel::sql_types::{Nullable, SingleValue}; use std::fmt::Debug; -use uuid::Uuid; /// The table representing the collection. The resource references /// this table. @@ -40,7 +38,7 @@ type CollectionTable = < = <>::ResourceCollectionIdColumn as Column>::Table; +>>::ResourceIdColumn as Column>::Table; /// The default WHERE clause of the resource table. type ResourceTableDefaultWhereClause = @@ -65,16 +63,25 @@ pub trait DatastoreDetachTarget: Selectable + Sized { type CollectionIdColumn: Column; /// The time deleted column in the CollectionTable - type CollectionTimeDeletedColumn: Column + Default; + type CollectionTimeDeletedColumn: + Column
::Table> + + Default + + ExpressionMethods; /// The primary key column of the resource type ResourceIdColumn: Column; /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: Column + Default; + type ResourceCollectionIdColumn: + Column
::Table> + + Default + + ExpressionMethods; /// The time deleted column in the ResourceTable - type ResourceTimeDeletedColumn: Column + Default; + type ResourceTimeDeletedColumn: + Column
::Table> + + Default + + ExpressionMethods; /// Creates a statement for detaching a resource from the given collection. /// @@ -118,18 +125,6 @@ pub trait DatastoreDetachTarget: Selectable + Sized { >, ) -> DetachFromCollectionStatement where - // Ensure the "collection" columns all belong to the same table. - ( - ::Table, - ::Table, - ): TypesAreSame2, - // Ensure the "resource" columns all belong to the same table. - ( - ::Table, - ::Table, - ::Table, - ): TypesAreSame3, - // Treat the collection and resource as boxed tables. CollectionTable: BoxableTable, ResourceTable: BoxableTable, @@ -179,11 +174,6 @@ pub trait DatastoreDetachTarget: Selectable + Sized { ExprSqlType>: SingleValue, ExprSqlType: SingleValue, - // Allows calling "is_null()" on the following columns. - Self::CollectionTimeDeletedColumn: ExpressionMethods, - Self::ResourceTimeDeletedColumn: ExpressionMethods, - Self::ResourceCollectionIdColumn: ExpressionMethods, - // Necessary to actually select the resource in the output type. ResourceType: Selectable, { @@ -1042,7 +1032,7 @@ mod test { let collection_id = uuid::Uuid::new_v4(); - let collection = + let _collection = insert_collection(collection_id, "collection", &pool).await; let resource_id = uuid::Uuid::new_v4(); let _resource = insert_resource(resource_id, "resource", &pool).await; diff --git a/nexus/src/db/cte_utils.rs b/nexus/src/db/cte_utils.rs index 9ba77951527..7015cf5d1d5 100644 --- a/nexus/src/db/cte_utils.rs +++ b/nexus/src/db/cte_utils.rs @@ -12,12 +12,6 @@ use diesel::query_builder::*; use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; -/// Trick to check that columns come from the same table -pub trait TypesAreSame2 {} -impl TypesAreSame2 for (T, T) {} -pub trait TypesAreSame3 {} -impl TypesAreSame3 for (T, T, T) {} - /// The default WHERE clause of a table, when treated as an UPDATE target. pub(crate) type TableDefaultWhereClause
=
::WhereClause; diff --git a/nexus/src/db/model/instance.rs b/nexus/src/db/model/instance.rs index eed2ec7c185..e3086f16dd9 100644 --- a/nexus/src/db/model/instance.rs +++ b/nexus/src/db/model/instance.rs @@ -74,7 +74,6 @@ impl Into for Instance { } impl DatastoreAttachTarget for Instance { - type SerializedId = diesel::sql_types::Uuid; type Id = Uuid; type CollectionIdColumn = instance::dsl::id; From 438526bfd8556e4c79558be8bc02b301ae4f2a7d Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 19:11:07 -0400 Subject: [PATCH 22/29] fmt --- nexus/src/db/collection_attach.rs | 82 ++++++++++++++----------------- nexus/src/db/collection_detach.rs | 80 ++++++++++++++---------------- nexus/src/db/cte_utils.rs | 79 ++++++++++++++--------------- 3 files changed, 112 insertions(+), 129 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index b9ccc76105f..ea3bf9111ac 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -13,8 +13,8 @@ //! - Updates the resource row use super::cte_utils::{ - BoxableUpdateStatement, BoxedQuery, BoxableTable, ExprSqlType, FilterBy, - QueryFromClause, QuerySqlType + BoxableTable, BoxableUpdateStatement, BoxedQuery, ExprSqlType, FilterBy, + QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; @@ -47,6 +47,22 @@ type CollectionIdColumn = type ResourceIdColumn = >::ResourceIdColumn; +// Representation of Primary Key in Rust. +type CollectionPrimaryKey = + as Table>::PrimaryKey; +type ResourcePrimaryKey = + as Table>::PrimaryKey; +type ResourceForeignKey = + >::ResourceCollectionIdColumn; + +// Representation of Primary Key in SQL. +type SerializedCollectionPrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourcePrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourceForeignKey = + as diesel::Expression>::SqlType; + /// Trait to be implemented by structs representing an attachable collection. /// /// For example, since Instances have a one-to-many relationship with @@ -105,22 +121,22 @@ pub trait DatastoreAttachTarget: Selectable + Sized { type CollectionIdColumn: Column; /// The time deleted column in the CollectionTable - type CollectionTimeDeletedColumn: Column
::Table> + - Default + - ExpressionMethods; + type CollectionTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; /// The primary key column of the resource type ResourceIdColumn: Column; /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: Column
::Table> + - Default + - ExpressionMethods; + type ResourceCollectionIdColumn: Column
::Table> + + Default + + ExpressionMethods; /// The time deleted column in the ResourceTable - type ResourceTimeDeletedColumn: Column
::Table> + - Default + - ExpressionMethods; + type ResourceTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; /// Creates a statement for attaching a resource to the given collection. /// @@ -185,15 +201,13 @@ pub trait DatastoreAttachTarget: Selectable + Sized { // Allows sending "resource_exists_query" between threads. QuerySqlType>: Send, // Allows calling ".filter()" on the boxed collection table. - BoxedQuery>: - FilterBy, Self::Id>> + - FilterBy>, + BoxedQuery>: FilterBy, Self::Id>> + + FilterBy>, // Allows calling ".filter()" on the boxed resource table. - BoxedQuery>: - FilterBy, Self::Id>> + - FilterBy> + - FilterBy> + - FilterBy>, + BoxedQuery>: FilterBy, Self::Id>> + + FilterBy> + + FilterBy> + + FilterBy>, // Allows calling "update.into_boxed()" UpdateStatement< @@ -208,16 +222,13 @@ pub trait DatastoreAttachTarget: Selectable + Sized { FilterBy, Self::Id>>, // Allows using "id" in expressions (e.g. ".eq(...)") with... - Self::Id: - AsExpression< + Self::Id: AsExpression< // ... The Collection table's PK SerializedCollectionPrimaryKey, - > + - AsExpression< + > + AsExpression< // ... The Resource table's PK SerializedResourcePrimaryKey, - > + - AsExpression< + > + AsExpression< // ... The Resource table's FK to the Collection table SerializedResourceForeignKey, >, @@ -473,22 +484,6 @@ where { } -// Representation of Primary Key in Rust. -type CollectionPrimaryKey = - as Table>::PrimaryKey; -type ResourcePrimaryKey = - as Table>::PrimaryKey; -type ResourceForeignKey = - >::ResourceCollectionIdColumn; - -// Representation of Primary Key in SQL. -type SerializedCollectionPrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourcePrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourceForeignKey = - as diesel::Expression>::SqlType; - /// This implementation uses a CTE which attempts to do the following: /// /// 1. (collection_by_id, resource_by_id): Identify if the collection and @@ -753,9 +748,8 @@ mod test { name: Name::try_from(name.to_string()).unwrap(), description: "description".to_string(), }; - let c = Collection { - identity: CollectionIdentity::new(id, create_params), - }; + let c = + Collection { identity: CollectionIdentity::new(id, create_params) }; diesel::insert_into(collection::table) .values(c) diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 48daa9ebc0e..0ab32c87115 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -11,8 +11,8 @@ //! - Updates the resource row use super::cte_utils::{ - BoxableTable, BoxableUpdateStatement, BoxedQuery, FilterBy, ExprSqlType, - QuerySqlType, QueryFromClause, TableDefaultWhereClause + BoxableTable, BoxableUpdateStatement, BoxedQuery, ExprSqlType, FilterBy, + QueryFromClause, QuerySqlType, TableDefaultWhereClause, }; use super::pool::DbConnection; use crate::db::collection_attach::DatastoreAttachTarget; @@ -51,6 +51,22 @@ type CollectionIdColumn = type ResourceIdColumn = >::ResourceIdColumn; +// Representation of Primary Key in Rust. +type CollectionPrimaryKey = + as Table>::PrimaryKey; +type ResourcePrimaryKey = + as Table>::PrimaryKey; +type ResourceForeignKey = + >::ResourceCollectionIdColumn; + +// Representation of Primary Key in SQL. +type SerializedCollectionPrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourcePrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourceForeignKey = + as diesel::Expression>::SqlType; + /// Trait to be implemented by structs representing a detachable collection. /// /// A blanket implementation is provided for traits that implement @@ -63,25 +79,22 @@ pub trait DatastoreDetachTarget: Selectable + Sized { type CollectionIdColumn: Column; /// The time deleted column in the CollectionTable - type CollectionTimeDeletedColumn: - Column
::Table> + - Default + - ExpressionMethods; + type CollectionTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; /// The primary key column of the resource type ResourceIdColumn: Column; /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: - Column
::Table> + - Default + - ExpressionMethods; + type ResourceCollectionIdColumn: Column
::Table> + + Default + + ExpressionMethods; /// The time deleted column in the ResourceTable - type ResourceTimeDeletedColumn: - Column
::Table> + - Default + - ExpressionMethods; + type ResourceTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; /// Creates a statement for detaching a resource from the given collection. /// @@ -130,21 +143,21 @@ pub trait DatastoreDetachTarget: Selectable + Sized { ResourceTable: BoxableTable, // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". - QueryFromClause>: QueryFragment + Send, + QueryFromClause>: + QueryFragment + Send, QuerySqlType>: Send, // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". - QueryFromClause>: QueryFragment + Send, + QueryFromClause>: + QueryFragment + Send, QuerySqlType>: Send, // Allows calling ".filter()" on the boxed collection table. - BoxedQuery>: - FilterBy, Self::Id>> + - FilterBy>, + BoxedQuery>: FilterBy, Self::Id>> + + FilterBy>, // Allows calling ".filter()" on the boxed resource table. - BoxedQuery>: - FilterBy, Self::Id>> + - FilterBy> + - FilterBy>, + BoxedQuery>: FilterBy, Self::Id>> + + FilterBy> + + FilterBy>, // Allows calling "update.into_boxed()" UpdateStatement< @@ -414,22 +427,6 @@ where { } -// Representation of Primary Key in Rust. -type CollectionPrimaryKey = - as Table>::PrimaryKey; -type ResourcePrimaryKey = - as Table>::PrimaryKey; -type ResourceForeignKey = - >::ResourceCollectionIdColumn; - -// Representation of Primary Key in SQL. -type SerializedCollectionPrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourcePrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourceForeignKey = - as diesel::Expression>::SqlType; - /// This implementation uses a CTE which attempts to do the following: /// /// 1. (collection_by_id, resource_by_id): Identify if the collection and @@ -682,9 +679,8 @@ mod test { name: Name::try_from(name.to_string()).unwrap(), description: "description".to_string(), }; - let c = Collection { - identity: CollectionIdentity::new(id, create_params), - }; + let c = + Collection { identity: CollectionIdentity::new(id, create_params) }; diesel::insert_into(collection::table) .values(c) diff --git a/nexus/src/db/cte_utils.rs b/nexus/src/db/cte_utils.rs index 7015cf5d1d5..42743647f7e 100644 --- a/nexus/src/db/cte_utils.rs +++ b/nexus/src/db/cte_utils.rs @@ -13,7 +13,8 @@ use diesel::query_dsl::methods as query_methods; use diesel::query_source::Table; /// The default WHERE clause of a table, when treated as an UPDATE target. -pub(crate) type TableDefaultWhereClause
=
::WhereClause; +pub(crate) type TableDefaultWhereClause
= +
::WhereClause; // Short-hand type accessors. pub(crate) type QueryFromClause = ::FromClause; @@ -22,72 +23,64 @@ pub(crate) type ExprSqlType = ::SqlType; type TableSqlType = ::SqlType; pub(crate) type BoxedQuery = diesel::helper_types::IntoBoxed<'static, T, Pg>; -pub(crate) type BoxedDslOutput = diesel::internal::table_macro::BoxedSelectStatement< - 'static, - TableSqlType, - diesel::internal::table_macro::FromClause, - Pg, ->; +pub(crate) type BoxedDslOutput = + diesel::internal::table_macro::BoxedSelectStatement< + 'static, + TableSqlType, + diesel::internal::table_macro::FromClause, + Pg, + >; /// Ensures that the type is a Diesel table, and that we can call ".table" and /// ".into_boxed()" on it. -pub trait BoxableTable: HasTable
+pub trait BoxableTable: + HasTable
+ 'static + Send + Table + IntoUpdateTarget - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput, - > {} -impl BoxableTable for T -where + + query_methods::BoxedDsl<'static, Pg, Output = BoxedDslOutput> +{ +} +impl BoxableTable for T where T: HasTable
+ 'static + Send + Table + IntoUpdateTarget - + query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedDslOutput, - >, -{} + + query_methods::BoxedDsl<'static, Pg, Output = BoxedDslOutput> +{ +} /// Ensures that calling ".filter(predicate)" on this type is callable, and does /// not change the underlying type. -pub trait FilterBy: query_methods::FilterDsl {} -impl FilterBy for T -where - T: query_methods::FilterDsl {} +pub trait FilterBy: + query_methods::FilterDsl +{ +} +impl FilterBy for T where + T: query_methods::FilterDsl +{ +} /// Allows calling ".into_boxed" on an update statement. pub trait BoxableUpdateStatement: query_methods::BoxedDsl< - 'static, - Pg, - Output = BoxedUpdateStatement< - 'static, - Pg, - Table, - V - >, - > + 'static, + Pg, + Output = BoxedUpdateStatement<'static, Pg, Table, V>, +> where - Table: QuerySource {} + Table: QuerySource, +{ +} impl BoxableUpdateStatement for T where T: query_methods::BoxedDsl< 'static, Pg, - Output = BoxedUpdateStatement< - 'static, - Pg, - Table, - V, - >, + Output = BoxedUpdateStatement<'static, Pg, Table, V>, >, Table: QuerySource, -{} - +{ +} From 59524625b3a8b45c6ccea876e2b27a95793386a2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 22:30:45 -0400 Subject: [PATCH 23/29] Detach many working, sorta. Needs cleanup --- nexus/src/db/collection_detach.rs | 10 +- nexus/src/db/collection_detach_many.rs | 1257 ++++++++++++++++++++++++ nexus/src/db/mod.rs | 1 + 3 files changed, 1263 insertions(+), 5 deletions(-) create mode 100644 nexus/src/db/collection_detach_many.rs diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 0ab32c87115..0899fa65e2b 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -906,7 +906,7 @@ mod test { let collection = insert_collection(collection_id, "collection", &pool).await; - // Attempt to attach - even though the resource does not exist. + // Attempt to detach - even though the resource does not exist. let detach = Collection::detach_resource( collection_id, resource_id, @@ -984,7 +984,7 @@ mod test { let _resource = insert_resource(resource_id, "resource", &pool).await; attach_resource(collection_id, resource_id, &pool).await; - // Detach the resource to the collection. + // Detach the resource from the collection. let detach_query = Collection::detach_resource( collection_id, resource_id, @@ -1107,9 +1107,9 @@ mod test { .await .unwrap(); - // Detach the resource to the collection. Observe a failure which is + // Detach the resource from the collection. Observe a failure which is // indistinguishable from the resource not existing. - let attach = Collection::detach_resource( + let detach = Collection::detach_resource( collection_id, resource_id, collection::table.into_boxed(), @@ -1119,7 +1119,7 @@ mod test { ) .detach_and_get_result_async(pool.pool()) .await; - assert!(matches!(attach, Err(DetachError::ResourceNotFound))); + assert!(matches!(detach, Err(DetachError::ResourceNotFound))); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/src/db/collection_detach_many.rs b/nexus/src/db/collection_detach_many.rs new file mode 100644 index 00000000000..48670727395 --- /dev/null +++ b/nexus/src/db/collection_detach_many.rs @@ -0,0 +1,1257 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! CTE for detaching multiple resources from a collection. +//! +//! This atomically: +//! - Checks if the collection exists and is not soft deleted +//! - Validates conditions on both the collection and resources +//! - Updates the collection row +//! - Updates the resource rows + +use super::cte_utils::{ + BoxableTable, BoxableUpdateStatement, BoxedQuery, ExprSqlType, FilterBy, + QueryFromClause, QuerySqlType, TableDefaultWhereClause, +}; +use super::pool::DbConnection; +use crate::db::collection_attach::DatastoreAttachTarget; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; +use diesel::associations::HasTable; +use diesel::expression::{AsExpression, Expression}; +use diesel::helper_types::*; +use diesel::pg::Pg; +use diesel::prelude::*; +use diesel::query_builder::*; +use diesel::query_dsl::methods as query_methods; +use diesel::query_source::Table; +use diesel::sql_types::{Nullable, SingleValue}; +use std::fmt::Debug; + +/// The table representing the collection. The resource references +/// this table. +type CollectionTable = <>::CollectionIdColumn as Column>::Table; + +/// The table representing the resource. This table contains an +/// ID acting as a foreign key into the collection table. +type ResourceTable = <>::ResourceIdColumn as Column>::Table; + +/// The default WHERE clause of the collection table. +type CollectionTableDefaultWhereClause = + TableDefaultWhereClause>; +/// The default WHERE clause of the resource table. +type ResourceTableDefaultWhereClause = + TableDefaultWhereClause>; +/// Helper to access column type. +type CollectionIdColumn = + >::CollectionIdColumn; + +// Representation of Primary Key in Rust. +type CollectionPrimaryKey = + as Table>::PrimaryKey; +type ResourcePrimaryKey = + as Table>::PrimaryKey; +type ResourceForeignKey = + >::ResourceCollectionIdColumn; + +// Representation of Primary Key in SQL. +type SerializedCollectionPrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourcePrimaryKey = + as diesel::Expression>::SqlType; +type SerializedResourceForeignKey = + as diesel::Expression>::SqlType; + +/// Trait to be implemented by structs representing a detachable collection. +/// +/// A blanket implementation is provided for traits that implement +/// [`DatastoreAttachTarget`]. +pub trait DatastoreDetachManyTarget: + Selectable + Sized +{ + /// The Rust type of the collection and resource ids (typically Uuid). + type Id: Copy + Debug + PartialEq + Send + 'static; + + /// The primary key column of the collection. + type CollectionIdColumn: Column; + + /// The time deleted column in the CollectionTable + type CollectionTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; + + /// The primary key column of the resource + type ResourceIdColumn: Column; + + /// The column in the resource acting as a foreign key into the Collection + type ResourceCollectionIdColumn: Column
::Table> + + Default + + ExpressionMethods; + + /// The time deleted column in the ResourceTable + type ResourceTimeDeletedColumn: Column
::Table> + + Default + + ExpressionMethods; + + /// Creates a statement for detaching a resource from the given collection. + /// + /// This statement allows callers to atomically check the state of a + /// collection and a resource while detaching a resource. + /// + /// - `collection_id`: Primary key of the collection being removed from. + /// - `resource_id`: Primary key of the resource being detached. + /// - `collection_query`: An optional query for collection state. The + /// CTE will automatically filter this query to `collection_id`, and + /// validate that the "time deleted" column is NULL. + /// - `resource_query`: An optional query for the resource state. The + /// CTE will automatically filter this query to `resource_id`, + /// validate that the "time deleted" column is NULL, and validate that the + /// "collection_id" column points to `collection_id`. + /// - `update`: An update statement, identifying how the resource object + /// should be modified to be detached + /// + /// The VC, VR types refer to the "update target" of the UpdateStatements, + /// and should generally be inferred rather than explicitly specified. + fn detach_resources( + collection_id: Self::Id, + + collection_query: BoxedQuery>, + resource_query: BoxedQuery>, + + update_collection: UpdateStatement< + CollectionTable, + CollectionTableDefaultWhereClause, + VC, + >, + update_resource: UpdateStatement< + ResourceTable, + ResourceTableDefaultWhereClause, + VR, + >, + ) -> DetachManyFromCollectionStatement + where + // Treat the collection and resource as boxed tables. + CollectionTable: BoxableTable, + ResourceTable: BoxableTable, + + // Allows treating "collection_exists_query" as a boxed "dyn QueryFragment". + QueryFromClause>: + QueryFragment + Send, + QuerySqlType>: Send, + // Allows treating "resource_exists_query" as a boxed "dyn QueryFragment". + QueryFromClause>: + QueryFragment + Send, + QuerySqlType>: Send, + + // Allows calling ".filter()" on the boxed collection table. + BoxedQuery>: FilterBy, Self::Id>> + + FilterBy>, + // Allows calling ".filter()" on the boxed resource table. + BoxedQuery>: FilterBy> + + FilterBy>, + + // Allows calling "update.into_boxed()" + UpdateStatement< + CollectionTable, + CollectionTableDefaultWhereClause, + VC, + >: BoxableUpdateStatement, VC>, + UpdateStatement< + ResourceTable, + ResourceTableDefaultWhereClause, + VR, + >: BoxableUpdateStatement, VR>, + + // Allows calling + // ".filter(collection_table().primary_key().eq(collection_id)" on the + // boxed update statement. + BoxedUpdateStatement< + 'static, + Pg, + CollectionTable, + VC, + >: FilterBy, Self::Id>>, + // Allows calling + // ".filter(Self::ResourceTimeDeletedColumn::default().is_null())" + BoxedUpdateStatement< + 'static, + Pg, + ResourceTable, + VR, + >: FilterBy> + + FilterBy>, + + // Allows using "id" in expressions (e.g. ".eq(...)") with... + Self::Id: AsExpression< + // ... The Collection table's PK + SerializedCollectionPrimaryKey, + > + AsExpression< + // ... The Resource table's PK + SerializedResourcePrimaryKey, + > + AsExpression< + // ... The Resource table's FK to the Collection table + SerializedResourceForeignKey, + >, + ExprSqlType>: SingleValue, + ExprSqlType>: SingleValue, + ExprSqlType: SingleValue, + + ResourceType: Selectable, + { + let collection_table = + || as HasTable>::table(); + + // Create new queries to determine if the collection exists. + let collection_exists_query = Box::new( + collection_table() + .into_boxed() + .filter(collection_table().primary_key().eq(collection_id)) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()), + ); + + // For the queries which decide whether or not we'll perform the update, + // extend the user-provided arguments. + // + // We force these queries to: + // - Check against the primary key of the target objects + // - Ensure the objects are not deleted + // - (for the resources) Ensure they are attached + // - (for the update) Ensure that only the collection with "collection_id" + // is modified. + let collection_query = Box::new( + collection_query + .filter(collection_table().primary_key().eq(collection_id)) + .filter(Self::CollectionTimeDeletedColumn::default().is_null()), + ); + let resource_query = Box::new( + resource_query + .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + .filter( + Self::ResourceCollectionIdColumn::default() + .eq(collection_id), + ), + ); + + let update_collection_statement = update_collection + .into_boxed() + .filter(collection_table().primary_key().eq(collection_id)); + + let update_resource_statement = update_resource + .into_boxed() + .filter(Self::ResourceTimeDeletedColumn::default().is_null()) + .filter( + Self::ResourceCollectionIdColumn::default().eq(collection_id), + ); + + let collection_returning_clause = Self::as_returning(); + DetachManyFromCollectionStatement { + collection_exists_query, + collection_query, + resource_query, + update_collection_statement, + update_resource_statement, + collection_returning_clause, + } + } +} + +impl DatastoreDetachManyTarget for T +where + T: DatastoreAttachTarget, +{ + type Id = T::Id; + type CollectionIdColumn = T::CollectionIdColumn; + type CollectionTimeDeletedColumn = T::CollectionTimeDeletedColumn; + type ResourceIdColumn = T::ResourceIdColumn; + type ResourceCollectionIdColumn = T::ResourceCollectionIdColumn; + type ResourceTimeDeletedColumn = T::ResourceTimeDeletedColumn; +} + +/// The CTE described in the module docs +#[must_use = "Queries must be executed"] +pub struct DetachManyFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachManyTarget, +{ + // Query which answers: "Does the collection exist?" + collection_exists_query: Box + Send>, + // A (mostly) user-provided query for validating the collection. + collection_query: Box + Send>, + // A (mostly) user-provided query for validating the resource. + resource_query: Box + Send>, + + // Update statement for the collection. + update_collection_statement: + BoxedUpdateStatement<'static, Pg, CollectionTable, VC>, + // Update statement for the resource. + update_resource_statement: + BoxedUpdateStatement<'static, Pg, ResourceTable, VR>, + // Describes what should be returned after UPDATE-ing the resource. + collection_returning_clause: AsSelect, +} + +impl QueryId + for DetachManyFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachManyTarget, +{ + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +/// Result of [`DetachManyFromCollectionStatement`] when executed asynchronously +pub type AsyncDetachManyFromCollectionResult = + Result>; + +/// Result of [`DetachManyFromCollectionStatement`] when executed synchronously +pub type SyncDetachManyFromCollectionResult = + Result>; + +/// Errors returned by [`DetachManyFromCollectionStatement`]. +#[derive(Debug)] +pub enum DetachError { + /// The collection that the query was removing from does not exist + CollectionNotFound, + /// Although the collection exists, the update did not occur + /// + /// The unchanged resource and collection are returned as a part of this + /// error; it is the responsibility of the caller to determine which + /// condition was not met. + NoUpdate { collection: C }, + /// Other database error + DatabaseError(E), +} + +/// Describes the type returned from the actual CTE, which is parsed +/// and interpreted before propagating it to users of the Rust API. +pub type RawOutput = (i64, Option, Option); + +impl + DetachManyFromCollectionStatement +where + ResourceType: 'static + Debug + Send + Selectable, + C: 'static + Debug + DatastoreDetachManyTarget + Send, + ResourceTable: 'static + Table + Send + Copy + Debug, + VC: 'static + Send, + VR: 'static + Send, + DetachManyFromCollectionStatement: Send, +{ + /// Issues the CTE asynchronously and parses the result. + pub async fn detach_and_get_result_async( + self, + pool: &bb8::Pool>, + ) -> AsyncDetachManyFromCollectionResult + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, + { + self.get_result_async::>(pool) + .await + // If the database returns an error, propagate it right away. + .map_err(DetachError::DatabaseError) + // Otherwise, parse the output to determine if the CTE succeeded. + .and_then(Self::parse_result) + } + + /// Issues the CTE synchronously and parses the result. + pub fn detach_and_get_result( + self, + conn: &mut DbConnection, + ) -> SyncDetachManyFromCollectionResult + where + // We require this bound to ensure that "Self" is runnable as query. + Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, + { + self.get_result::>(conn) + .map_err(DetachError::DatabaseError) + .and_then(Self::parse_result) + } + + fn parse_result(result: RawOutput) -> Result> { + let (_, collection_before_update, collection_after_update) = result; + + let collection_before_update = collection_before_update + .ok_or_else(|| DetachError::CollectionNotFound)?; + + match collection_after_update { + Some(collection) => Ok(collection), + None => Err(DetachError::NoUpdate { + collection: collection_before_update, + }), + } + } +} + +type SelectableSqlType = + <>::SelectExpression as Expression>::SqlType; + +impl Query + for DetachManyFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachManyTarget, +{ + type SqlType = ( + // Ignored "SELECT 1" value + diesel::sql_types::BigInt, + // If the collection exists, the value before update. + Nullable>, + // If the collection was updated, the new value. + Nullable>, + ); +} + +impl RunQueryDsl + for DetachManyFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachManyTarget, +{ +} + +/// This implementation uses a CTE which attempts to do the following: +/// +/// 1. (collection_by_id): Identify if the collection exists at all. +/// 2. (collection_info, resource_info): Checks for arbitrary user-provided +/// constraints on the collection and resource objects. +/// 3. (do_update): IFF all previous checks succeeded, make a decision to perfom +/// an update. +/// 4. (updated_collection, updated_resource): Apply user-provided updates on +/// the collection and resource - presumably, setting the collection ID +/// value. +/// +/// This is implemented as follows: +/// +/// ```text +/// // WITH +/// // /* Look up the collection - Check for existence only! */ +/// // collection_by_id AS ( +/// // SELECT * FROM C +/// // WHERE = AND IS NULL +/// // FOR UPDATE +/// // ), +/// // /* Look up the collection - Check for additional constraints */ +/// // collection_info AS ( +/// // SELECT * FROM C +/// // WHERE = AND IS NULL AND +/// // +/// // FOR UPDATE +/// // ), +/// // /* Look up the resource - Check for additional constraints */ +/// // resource_info AS ( +/// // SELECT * FROM R +/// // WHERE IS NULL AND +/// // = AND +/// // FOR UPDATE +/// // ), +/// // /* Make a decision on whether or not to apply ANY updates */ +/// // do_update AS ( +/// // SELECT IF( +/// // EXISTS(SELECT id FROM collection_info) +/// // TRUE, FALSE), +/// // ), +/// // /* Update the collection */ +/// // updated_collection AS ( +/// // UPDATE C SET +/// // WHERE IN (SELECT FROM collection_info) AND (SELECT * FROM do_update) +/// // RETURNING * +/// // ), +/// // /* Update the resource */ +/// // updated_resource AS ( +/// // UPDATE R SET +/// // WHERE (id IN (SELECT id FROM resource_info)) AND (SELECT * FROM do_update) +/// // RETURNING 1 +/// // ) +/// // SELECT * FROM +/// // (SELECT 1) +/// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE +/// // LEFT JOIN (SELECT * FROM updated_collection) ON TRUE; +/// ``` +impl QueryFragment + for DetachManyFromCollectionStatement +where + ResourceType: Selectable, + C: DatastoreDetachManyTarget, + CollectionPrimaryKey: diesel::Column, + // Necessary to "walk_ast" over "self.update_collection_statement". + BoxedUpdateStatement<'static, Pg, CollectionTable, VC>: + QueryFragment, + // Necessary to "walk_ast" over "self.update_resource_statement". + BoxedUpdateStatement<'static, Pg, ResourceTable, VR>: + QueryFragment, + // Necessary to "walk_ast" over "self.collection_returning_clause". + AsSelect: QueryFragment, +{ + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + out.unsafe_to_cache_prepared(); + out.push_sql("WITH collection_by_id AS ("); + self.collection_exists_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("collection_info AS ("); + self.collection_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("resource_info AS ("); + self.resource_query.walk_ast(out.reborrow())?; + out.push_sql(" FOR UPDATE), "); + + out.push_sql("do_update AS (SELECT IF(EXISTS(SELECT "); + out.push_identifier(CollectionIdColumn::::NAME)?; + out.push_sql(" FROM collection_info), TRUE,FALSE)), "); + + out.push_sql("updated_collection AS ("); + self.update_collection_statement.walk_ast(out.reborrow())?; + // NOTE: It is safe to start with "AND" - we forced the update statement + // to have a WHERE clause on the primary key of the resource. + out.push_sql(" AND (SELECT * FROM do_update)"); + out.push_sql(" RETURNING "); + self.collection_returning_clause.walk_ast(out.reborrow())?; + out.push_sql("), "); + + out.push_sql("updated_resource AS ("); + self.update_resource_statement.walk_ast(out.reborrow())?; + // NOTE: It is safe to start with "AND" - we forced the update statement + // to have a WHERE claustime deleted column. + // + // TODO TODO TODO : WE NEED TO FIX THIS. + // - Use the actual name of "id" + out.push_sql(" AND (id IN (SELECT id FROM resource_info))"); + out.push_sql(" AND (SELECT * FROM do_update)"); + out.push_sql(" RETURNING 1"); + out.push_sql(") "); + + // Why do all these LEFT JOINs here? In short, to ensure that we are + // always returning a constant number of columns. + // + // Diesel parses output "one column at a time", mapping to structs or + // tuples. For example, when deserializing an "Option<(A, B, C)>" object, + // Diesel checks nullability of the "A", "B", and "C" columns. + // If any of those columns unexpectedly return NULL, the entire object is + // treated as "None". + // + // In summary: + // - Without the LEFT JOINs, we'd occassionally be returning "zero + // rows", which would make the output entirely unparseable. + // - If we used an operation like COALESCE (which attempts to map the + // result of an expression to either "NULL" or a single tuple column), + // Diesel struggles to map the result back to a structure. + // + // By returning a static number of columns, each component of the + // "RawOutput" tuple can be parsed, regardless of nullability, without + // preventing later portions of the result from being parsed. + out.push_sql( + "SELECT * FROM \ + (SELECT 1) \ + LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_collection) ON TRUE;", + ); + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::{DatastoreDetachManyTarget, DetachError}; + use crate::db::collection_attach::DatastoreAttachTarget; + use crate::db::{ + self, error::TransactionError, identity::Resource as IdentityResource, + }; + use async_bb8_diesel::{ + AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + }; + use chrono::Utc; + use db_macros::Resource; + use diesel::expression_methods::ExpressionMethods; + use diesel::pg::Pg; + use diesel::QueryDsl; + use diesel::SelectableHelper; + use nexus_test_utils::db::test_setup_database; + use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; + use omicron_test_utils::dev; + use uuid::Uuid; + + table! { + test_schema.collection (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + } + } + + table! { + test_schema.resource (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + collection_id -> Nullable, + } + } + + async fn setup_db(pool: &crate::db::Pool) { + let connection = pool.pool().get().await.unwrap(); + (*connection) + .batch_execute_async( + "CREATE SCHEMA IF NOT EXISTS test_schema; \ + CREATE TABLE IF NOT EXISTS test_schema.collection ( \ + id UUID PRIMARY KEY, \ + name STRING(63) NOT NULL, \ + description STRING(512) NOT NULL, \ + time_created TIMESTAMPTZ NOT NULL, \ + time_modified TIMESTAMPTZ NOT NULL, \ + time_deleted TIMESTAMPTZ); \ + CREATE TABLE IF NOT EXISTS test_schema.resource( \ + id UUID PRIMARY KEY, \ + name STRING(63) NOT NULL, \ + description STRING(512) NOT NULL, \ + time_created TIMESTAMPTZ NOT NULL, \ + time_modified TIMESTAMPTZ NOT NULL, \ + time_deleted TIMESTAMPTZ, \ + collection_id UUID); \ + CREATE INDEX IF NOT EXISTS collection_index ON test_schema.resource ( \ + collection_id \ + ) WHERE collection_id IS NOT NULL AND time_deleted IS NULL; \ + TRUNCATE test_schema.collection; \ + TRUNCATE test_schema.resource", + ) + .await + .unwrap(); + } + + /// Describes a resource within the database. + #[derive( + Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq, + )] + #[diesel(table_name = resource)] + struct Resource { + #[diesel(embed)] + pub identity: ResourceIdentity, + pub collection_id: Option, + } + + #[derive( + Clone, Queryable, Insertable, Debug, Resource, Selectable, PartialEq, + )] + #[diesel(table_name = collection)] + struct Collection { + #[diesel(embed)] + pub identity: CollectionIdentity, + } + + impl DatastoreAttachTarget for Collection { + type Id = uuid::Uuid; + + type CollectionIdColumn = collection::dsl::id; + type CollectionTimeDeletedColumn = collection::dsl::time_deleted; + + type ResourceIdColumn = resource::dsl::id; + type ResourceCollectionIdColumn = resource::dsl::collection_id; + type ResourceTimeDeletedColumn = resource::dsl::time_deleted; + } + + async fn insert_collection( + id: Uuid, + name: &str, + pool: &db::Pool, + ) -> Collection { + let create_params = IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "description".to_string(), + }; + let c = + Collection { identity: CollectionIdentity::new(id, create_params) }; + + diesel::insert_into(collection::table) + .values(c) + .execute_async(pool.pool()) + .await + .unwrap(); + + get_collection(id, &pool).await + } + + async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + collection::table + .find(id) + .select(Collection::as_select()) + .first_async(pool.pool()) + .await + .unwrap() + } + + async fn insert_resource( + id: Uuid, + name: &str, + pool: &db::Pool, + ) -> Resource { + let create_params = IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: "description".to_string(), + }; + let r = Resource { + identity: ResourceIdentity::new(id, create_params), + collection_id: None, + }; + + diesel::insert_into(resource::table) + .values(r) + .execute_async(pool.pool()) + .await + .unwrap(); + + get_resource(id, &pool).await + } + + async fn attach_resource( + collection_id: Uuid, + resource_id: Uuid, + pool: &db::Pool, + ) { + Collection::attach_resource( + collection_id, + resource_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + 100, + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)), + ) + .attach_and_get_result_async(pool.pool()) + .await + .unwrap(); + } + + async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + resource::table + .find(id) + .select(Resource::as_select()) + .first_async(pool.pool()) + .await + .unwrap() + } + + #[test] + fn test_verify_query() { + let collection_id = + uuid::Uuid::parse_str("cccccccc-cccc-cccc-cccc-cccccccccccc") + .unwrap(); + let _resource_id = + uuid::Uuid::parse_str("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa") + .unwrap(); + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ); + let query = diesel::debug_query::(&detach).to_string(); + + let expected_query = "WITH \ + collection_by_id AS (\ + SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\" \ + FROM \"test_schema\".\"collection\" \ + WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $1) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + collection_info AS (\ + SELECT \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\" \ + FROM \"test_schema\".\"collection\" \ + WHERE (\ + (\"test_schema\".\"collection\".\"id\" = $2) AND \ + (\"test_schema\".\"collection\".\"time_deleted\" IS NULL)\ + ) FOR UPDATE\ + ), \ + resource_info AS (\ + SELECT \ + \"test_schema\".\"resource\".\"id\", \ + \"test_schema\".\"resource\".\"name\", \ + \"test_schema\".\"resource\".\"description\", \ + \"test_schema\".\"resource\".\"time_created\", \ + \"test_schema\".\"resource\".\"time_modified\", \ + \"test_schema\".\"resource\".\"time_deleted\", \ + \"test_schema\".\"resource\".\"collection_id\" \ + FROM \"test_schema\".\"resource\" \ + WHERE (\ + (\"test_schema\".\"resource\".\"time_deleted\" IS NULL) AND \ + (\"test_schema\".\"resource\".\"collection_id\" = $3)\ + ) FOR UPDATE\ + ), \ + do_update AS (\ + SELECT IF(\ + EXISTS(SELECT \"id\" FROM collection_info), \ + TRUE,\ + FALSE)\ + ), \ + updated_collection AS (\ + UPDATE \ + \"test_schema\".\"collection\" \ + SET \ + \"description\" = $4 \ + WHERE \ + (\"test_schema\".\"collection\".\"id\" = $5) AND \ + (SELECT * FROM do_update) \ + RETURNING \ + \"test_schema\".\"collection\".\"id\", \ + \"test_schema\".\"collection\".\"name\", \ + \"test_schema\".\"collection\".\"description\", \ + \"test_schema\".\"collection\".\"time_created\", \ + \"test_schema\".\"collection\".\"time_modified\", \ + \"test_schema\".\"collection\".\"time_deleted\"\ + ), \ + updated_resource AS (\ + UPDATE \ + \"test_schema\".\"resource\" \ + SET \ + \"collection_id\" = $6 \ + WHERE \ + ((\"test_schema\".\"resource\".\"time_deleted\" IS NULL) AND \ + (\"test_schema\".\"resource\".\"collection_id\" = $7)) AND \ + (id IN (SELECT id FROM resource_info)) AND \ + (SELECT * FROM do_update) \ + RETURNING 1\ + ) \ + SELECT * FROM \ + (SELECT 1) \ + LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE \ + LEFT JOIN (SELECT * FROM updated_collection) ON TRUE; -- binds: [cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, cccccccc-cccc-cccc-cccc-cccccccccccc, \"Updated desc\", cccccccc-cccc-cccc-cccc-cccccccccccc, None, cccccccc-cccc-cccc-cccc-cccccccccccc]"; + assert_eq!(query, expected_query); + } + + #[tokio::test] + async fn test_detach_missing_collection_fails() { + let logctx = + dev::test_setup_log("test_detach_missing_collection_fails"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let _resource_id = uuid::Uuid::new_v4(); + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + assert!(matches!(detach, Err(DetachError::CollectionNotFound))); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_missing_resource_succeeds() { + let logctx = + dev::test_setup_log("test_detach_missing_resource_succeeds"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let _resource_id = uuid::Uuid::new_v4(); + + // Create the collection + let _collection = + insert_collection(collection_id, "collection", &pool).await; + + // Attempt to detach - even though the resource does not exist. + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + let returned_collection = detach.expect("Detach should have worked"); + assert_eq!(returned_collection.description(), "Updated desc"); + // The collection should still be updated. + assert_eq!( + returned_collection, + get_collection(collection_id, &pool).await + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_once() { + let logctx = dev::test_setup_log("test_detach_once"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. Attach them. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach the resource from the collection. + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + // "detach_and_get_result_async" should return the updated collection. + let returned_collection = detach.expect("Detach should have worked"); + // The returned value should be the latest value in the DB. + assert_eq!( + returned_collection, + get_collection(collection_id, &pool).await + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_once_synchronous() { + let logctx = dev::test_setup_log("test_detach_once_synchronous"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach the resource from the collection. + let detach_query = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ); + + type TxnError = + TransactionError>; + let result = pool + .pool() + .transaction(move |conn| { + detach_query.detach_and_get_result(conn).map_err(|e| match e { + DetachError::DatabaseError(e) => TxnError::from(e), + e => TxnError::CustomError(e), + }) + }) + .await; + + // "detach_and_get_result" should return the "detached" resource. + let returned_collection = result.expect("Detach should have worked"); + // The returned values should be the latest value in the DB. + assert_eq!( + returned_collection, + get_collection(collection_id, &pool).await + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_while_already_detached() { + let logctx = dev::test_setup_log("test_detach_while_already_detached"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let resource_id = uuid::Uuid::new_v4(); + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach a resource from a collection, as usual. + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + assert_eq!( + detach.expect("Detach should have worked").description(), + "Updated desc" + ); + + // Try detaching once more. This one won't detach anything, but + // we still expect it to succeed. + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("... and again!")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + assert_eq!( + detach.expect("Detach should have worked").description(), + "... and again!" + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_filter_collection() { + let logctx = dev::test_setup_log("test_detach_filter_collection"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let resource_id = uuid::Uuid::new_v4(); + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Detach a resource from a collection, but do so with a picky filter + // on the collectipon. + let detach = Collection::detach_resources( + collection_id, + collection::table + .into_boxed() + .filter(collection::dsl::name.eq("This name will not match")), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + let err = detach.expect_err("Expected this detach to fail"); + + // A caller should be able to inspect this result; the collection + // exists but has a different name than requested. + match err { + DetachError::NoUpdate { collection } => { + assert_eq!( + collection, + get_collection(collection_id, &pool).await + ); + } + _ => panic!("Unexpected error: {:?}", err), + }; + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_deleted_resource() { + let logctx = dev::test_setup_log("test_detach_deleted_resource"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + let collection_id = uuid::Uuid::new_v4(); + let resource_id = uuid::Uuid::new_v4(); + + // Create the collection and resource. + let _collection = + insert_collection(collection_id, "collection", &pool).await; + let _resource = insert_resource(resource_id, "resource", &pool).await; + attach_resource(collection_id, resource_id, &pool).await; + + // Immediately soft-delete the resource. + diesel::update( + resource::table.filter(resource::dsl::id.eq(resource_id)), + ) + .set(resource::dsl::time_deleted.eq(Utc::now())) + .execute_async(pool.pool()) + .await + .unwrap(); + + // Detach the resource from the collection. Observe a failure which is + // indistinguishable from the resource not existing. + let detach = Collection::detach_resources( + collection_id, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(collection_id)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + assert_eq!( + detach.expect("Detach should have worked").description(), + "Updated desc" + ); + assert_eq!( + get_resource(resource_id, &pool) + .await + .collection_id + .as_ref() + .expect("Should be deleted, but still attached"), + &collection_id, + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_detach_many() { + let logctx = dev::test_setup_log("test_detach_many"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + + setup_db(&pool).await; + + // Create the collection and some resources. + let collection_id1 = uuid::Uuid::new_v4(); + let _collection1 = + insert_collection(collection_id1, "collection", &pool).await; + let resource_id1 = uuid::Uuid::new_v4(); + let resource_id2 = uuid::Uuid::new_v4(); + let _resource1 = + insert_resource(resource_id1, "resource1", &pool).await; + attach_resource(collection_id1, resource_id1, &pool).await; + let _resource2 = + insert_resource(resource_id2, "resource2", &pool).await; + attach_resource(collection_id1, resource_id2, &pool).await; + + // Create a separate collection with a resource. + // + // We will check that this resource is untouched after operating + // on "collection_id1". + let collection_id2 = uuid::Uuid::new_v4(); + let _collection2 = + insert_collection(collection_id2, "collection2", &pool).await; + let resource_id3 = uuid::Uuid::new_v4(); + let _resource3 = + insert_resource(resource_id3, "resource3", &pool).await; + attach_resource(collection_id2, resource_id3, &pool).await; + + // Detach the resource from the collection. + let detach = Collection::detach_resources( + collection_id1, + collection::table.into_boxed(), + resource::table.into_boxed(), + diesel::update(collection::table) + .set(collection::dsl::description.eq("Updated desc")), + diesel::update(resource::table) + .set(resource::dsl::collection_id.eq(Option::::None)), + ) + .detach_and_get_result_async(pool.pool()) + .await; + + let returned_resource = detach.expect("Detach should have worked"); + assert_eq!(returned_resource.id(), collection_id1); + assert_eq!(returned_resource.description(), "Updated desc"); + + // Note that only "resource1" and "resource2" should be detached. + assert!(get_resource(resource_id1, &pool) + .await + .collection_id + .is_none()); + assert!(get_resource(resource_id2, &pool) + .await + .collection_id + .is_none()); + + // "resource3" should have been left alone. + assert_eq!( + get_resource(resource_id3, &pool) + .await + .collection_id + .as_ref() + .expect("Should still be attached"), + &collection_id2 + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index 2c2fb0d7ecf..17cecf50121 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -8,6 +8,7 @@ // doctests pub mod collection_attach; pub mod collection_detach; +pub mod collection_detach_many; pub mod collection_insert; mod config; mod cte_utils; From 03f9d2f95be8482cacd3b68a4b1568804382b10d Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 24 May 2022 23:33:03 -0400 Subject: [PATCH 24/29] Use 'detach_all' --- common/src/sql/dbinit.sql | 3 - nexus/src/app/instance.rs | 66 ++----------------- nexus/src/db/collection_detach_many.rs | 50 ++++++++------- nexus/src/db/datastore.rs | 89 ++++++++++++++++---------- nexus/src/db/model/disk.rs | 11 ---- nexus/src/db/model/instance.rs | 4 -- nexus/src/db/schema.rs | 1 - 7 files changed, 88 insertions(+), 136 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 41373079da6..3e1d553f999 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -340,9 +340,6 @@ CREATE TABLE omicron.public.instance ( /* user data for instance initialization systems (e.g. cloud-init) */ user_data BYTES NOT NULL, - /* child resource generation number, per RFD 192 */ - rcgen INT NOT NULL, - /* * TODO Would it make sense for the runtime state to live in a separate * table? diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index f3adb8cd415..6f81c5962ff 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -151,17 +151,9 @@ impl super::Nexus { Ok(db_instance) } - // TODO-correctness It's not totally clear what the semantics and behavior - // should be here. It might be nice to say that you can only do this - // operation if the Instance is already stopped, in which case we can - // execute this immediately by just removing it from the database, with the - // same race we have with disk delete (i.e., if someone else is requesting - // an instance boot, we may wind up in an inconsistent state). On the other - // hand, we could always allow this operation, issue the request to the SA - // to destroy the instance (not just stop it), and proceed with deletion - // when that finishes. But in that case, although the HTTP DELETE request - // completed, the object will still appear for a little while, which kind of - // sucks. + // This operation may only occur on stopped instances, which implies that + // the attached disks do not have any running "upstairs" process running + // within the Sled Agent. pub async fn project_destroy_instance( &self, opctx: &OpContext, @@ -172,7 +164,7 @@ impl super::Nexus { // TODO-robustness We need to figure out what to do with Destroyed // instances? Presumably we need to clean them up at some point, but // not right away so that callers can see that they've been destroyed. - let (.., authz_instance, db_instance) = + let (.., authz_instance, _db_instance) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -180,50 +172,6 @@ impl super::Nexus { .fetch() .await?; - opctx.authorize(authz::Action::Delete, &authz_instance).await?; - - match db_instance.runtime_state.state.state() { - InstanceState::Stopped | InstanceState::Failed => { - // ok - } - - state => { - return Err(Error::InvalidRequest { - message: format!( - "instance cannot be deleted in state \"{}\"", - state, - ), - }); - } - } - - // Detach all attached disks - let disks = self - .instance_list_disks( - opctx, - organization_name, - project_name, - instance_name, - &DataPageParams { - marker: None, - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(MAX_DISKS_PER_INSTANCE) - .unwrap(), - }, - ) - .await?; - - for disk in &disks { - self.instance_detach_disk( - opctx, - organization_name, - project_name, - instance_name, - &disk.name(), - ) - .await?; - } - self.db_datastore.project_delete_instance(opctx, &authz_instance).await } @@ -578,7 +526,7 @@ impl super::Nexus { /// Attach a disk to an instance. pub async fn instance_attach_disk( - self: &Arc, + &self, opctx: &OpContext, organization_name: &Name, project_name: &Name, @@ -614,7 +562,7 @@ impl super::Nexus { // - Update the disk state in the DB to "Attached". let (_instance, disk) = self .db_datastore - .disk_attach( + .instance_attach_disk( &opctx, &authz_instance, &authz_disk, @@ -662,7 +610,7 @@ impl super::Nexus { // - Update the disk state in the DB to "Detached". let disk = self .db_datastore - .disk_detach(&opctx, &authz_instance, &authz_disk) + .instance_detach_disk(&opctx, &authz_instance, &authz_disk) .await?; Ok(disk) } diff --git a/nexus/src/db/collection_detach_many.rs b/nexus/src/db/collection_detach_many.rs index 48670727395..4a99b49f006 100644 --- a/nexus/src/db/collection_detach_many.rs +++ b/nexus/src/db/collection_detach_many.rs @@ -307,15 +307,15 @@ where /// Result of [`DetachManyFromCollectionStatement`] when executed asynchronously pub type AsyncDetachManyFromCollectionResult = - Result>; + Result>; /// Result of [`DetachManyFromCollectionStatement`] when executed synchronously pub type SyncDetachManyFromCollectionResult = - Result>; + Result>; /// Errors returned by [`DetachManyFromCollectionStatement`]. #[derive(Debug)] -pub enum DetachError { +pub enum DetachManyError { /// The collection that the query was removing from does not exist CollectionNotFound, /// Although the collection exists, the update did not occur @@ -354,7 +354,7 @@ where self.get_result_async::>(pool) .await // If the database returns an error, propagate it right away. - .map_err(DetachError::DatabaseError) + .map_err(DetachManyError::DatabaseError) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(Self::parse_result) } @@ -369,19 +369,21 @@ where Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, { self.get_result::>(conn) - .map_err(DetachError::DatabaseError) + .map_err(DetachManyError::DatabaseError) .and_then(Self::parse_result) } - fn parse_result(result: RawOutput) -> Result> { + fn parse_result( + result: RawOutput, + ) -> Result> { let (_, collection_before_update, collection_after_update) = result; let collection_before_update = collection_before_update - .ok_or_else(|| DetachError::CollectionNotFound)?; + .ok_or_else(|| DetachManyError::CollectionNotFound)?; match collection_after_update { Some(collection) => Ok(collection), - None => Err(DetachError::NoUpdate { + None => Err(DetachManyError::NoUpdate { collection: collection_before_update, }), } @@ -478,7 +480,7 @@ impl QueryFragment where ResourceType: Selectable, C: DatastoreDetachManyTarget, - CollectionPrimaryKey: diesel::Column, + ResourcePrimaryKey: diesel::Column, // Necessary to "walk_ast" over "self.update_collection_statement". BoxedUpdateStatement<'static, Pg, CollectionTable, VC>: QueryFragment, @@ -518,14 +520,13 @@ where out.push_sql("updated_resource AS ("); self.update_resource_statement.walk_ast(out.reborrow())?; // NOTE: It is safe to start with "AND" - we forced the update statement - // to have a WHERE claustime deleted column. - // - // TODO TODO TODO : WE NEED TO FIX THIS. - // - Use the actual name of "id" - out.push_sql(" AND (id IN (SELECT id FROM resource_info))"); - out.push_sql(" AND (SELECT * FROM do_update)"); - out.push_sql(" RETURNING 1"); - out.push_sql(") "); + // to have a WHERE clause on the time deleted column. + out.push_sql(" AND ("); + out.push_identifier(ResourcePrimaryKey::::NAME)?; + out.push_sql(" IN (SELECT "); + out.push_identifier(ResourcePrimaryKey::::NAME)?; + out.push_sql(" FROM resource_info))"); + out.push_sql(" AND (SELECT * FROM do_update) RETURNING 1) "); // Why do all these LEFT JOINs here? In short, to ensure that we are // always returning a constant number of columns. @@ -559,7 +560,7 @@ where #[cfg(test)] mod test { - use super::{DatastoreDetachManyTarget, DetachError}; + use super::{DatastoreDetachManyTarget, DetachManyError}; use crate::db::collection_attach::DatastoreAttachTarget; use crate::db::{ self, error::TransactionError, identity::Resource as IdentityResource, @@ -836,7 +837,7 @@ mod test { WHERE \ ((\"test_schema\".\"resource\".\"time_deleted\" IS NULL) AND \ (\"test_schema\".\"resource\".\"collection_id\" = $7)) AND \ - (id IN (SELECT id FROM resource_info)) AND \ + (\"id\" IN (SELECT \"id\" FROM resource_info)) AND \ (SELECT * FROM do_update) \ RETURNING 1\ ) \ @@ -871,7 +872,7 @@ mod test { .detach_and_get_result_async(pool.pool()) .await; - assert!(matches!(detach, Err(DetachError::CollectionNotFound))); + assert!(matches!(detach, Err(DetachManyError::CollectionNotFound))); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -991,13 +992,14 @@ mod test { .set(resource::dsl::collection_id.eq(Option::::None)), ); - type TxnError = - TransactionError>; + type TxnError = TransactionError< + DetachManyError, + >; let result = pool .pool() .transaction(move |conn| { detach_query.detach_and_get_result(conn).map_err(|e| match e { - DetachError::DatabaseError(e) => TxnError::from(e), + DetachManyError::DatabaseError(e) => TxnError::from(e), e => TxnError::CustomError(e), }) }) @@ -1109,7 +1111,7 @@ mod test { // A caller should be able to inspect this result; the collection // exists but has a different name than requested. match err { - DetachError::NoUpdate { collection } => { + DetachManyError::NoUpdate { collection } => { assert_eq!( collection, get_collection(collection_id, &pool).await diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs index 95df564b267..2ad29ec2dc9 100644 --- a/nexus/src/db/datastore.rs +++ b/nexus/src/db/datastore.rs @@ -30,6 +30,9 @@ use crate::authz::{self, ApiResource}; use crate::context::OpContext; use crate::db::collection_attach::{AttachError, DatastoreAttachTarget}; use crate::db::collection_detach::{DatastoreDetachTarget, DetachError}; +use crate::db::collection_detach_many::{ + DatastoreDetachManyTarget, DetachManyError, +}; use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; use crate::db::fixed_data::role_builtin::BUILTIN_ROLES; use crate::db::fixed_data::silo::{DEFAULT_SILO, SILO_ID}; @@ -997,47 +1000,65 @@ impl DataStore { // This is subject to change, but for now we're going to say that an // instance must be "stopped" or "failed" in order to delete it. The // delete operation sets "time_deleted" (just like with other objects) - // and also sets the state to "destroyed". By virtue of being - // "stopped", we assume there are no dependencies on this instance - // (e.g., disk attachments). If that changes, we'll want to check for - // such dependencies here. + // and also sets the state to "destroyed". use api::external::InstanceState as ApiInstanceState; use db::model::InstanceState as DbInstanceState; - use db::schema::instance::dsl; - - let now = Utc::now(); + use db::schema::{disk, instance}; - let destroyed = DbInstanceState::new(ApiInstanceState::Destroyed); let stopped = DbInstanceState::new(ApiInstanceState::Stopped); let failed = DbInstanceState::new(ApiInstanceState::Failed); + let destroyed = DbInstanceState::new(ApiInstanceState::Destroyed); + let ok_to_delete_instance_states = vec![stopped, failed]; - let instance_id = authz_instance.id(); - let result = diesel::update(dsl::instance) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(instance_id)) - .filter(dsl::state.eq_any(vec![stopped, failed])) - .set((dsl::state.eq(destroyed), dsl::time_deleted.eq(now))) - .check_if_exists::(instance_id) - .execute_and_check(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_instance), - ) - })?; + let detached_label = api::external::DiskState::Detached.label(); + let ok_to_detach_disk_states = + vec![api::external::DiskState::Attached(authz_instance.id())]; + let ok_to_detach_disk_state_labels: Vec<_> = + ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); - match result.status { - UpdateStatus::Updated => Ok(()), - UpdateStatus::NotUpdatedButExists => { - return Err(Error::InvalidRequest { - message: format!( + let _instance = Instance::detach_resources( + authz_instance.id(), + instance::table.into_boxed().filter( + instance::dsl::state.eq_any(ok_to_delete_instance_states), + ), + disk::table.into_boxed().filter( + disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels), + ), + diesel::update(instance::dsl::instance).set(( + instance::dsl::state.eq(destroyed), + instance::dsl::time_deleted.eq(Utc::now()), + )), + diesel::update(disk::dsl::disk).set(( + disk::dsl::disk_state.eq(detached_label), + disk::dsl::attach_instance_id.eq(Option::::None), + )), + ) + .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| match e { + DetachManyError::CollectionNotFound => Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + ), + DetachManyError::NoUpdate { collection } => { + let instance_state = collection.runtime_state.state.state(); + match instance_state { + api::external::InstanceState::Stopped + | api::external::InstanceState::Failed => { + Error::internal_error("cannot delete instance") + } + _ => Error::invalid_request(&format!( "instance cannot be deleted in state \"{}\"", - result.found.runtime_state.state.state() - ), - }); + instance_state, + )), + } } - } + DetachManyError::DatabaseError(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + })?; + + Ok(()) } // Disks @@ -1117,7 +1138,7 @@ impl DataStore { /// - Exist /// - Are in valid states /// - Are under the maximum "attach count" threshold - pub async fn disk_attach( + pub async fn instance_attach_disk( &self, opctx: &OpContext, authz_instance: &authz::Instance, @@ -1254,7 +1275,7 @@ impl DataStore { Ok((instance, disk)) } - pub async fn disk_detach( + pub async fn instance_detach_disk( &self, opctx: &OpContext, authz_instance: &authz::Instance, diff --git a/nexus/src/db/model/disk.rs b/nexus/src/db/model/disk.rs index b7516557e93..d4aa2f20e16 100644 --- a/nexus/src/db/model/disk.rs +++ b/nexus/src/db/model/disk.rs @@ -163,17 +163,6 @@ impl DiskRuntimeState { } } - pub fn attaching(self, instance_id: Uuid) -> Self { - Self { - disk_state: external::DiskState::Attaching(instance_id) - .label() - .to_string(), - attach_instance_id: Some(instance_id), - gen: self.gen.next().into(), - time_updated: Utc::now(), - } - } - pub fn attach(self, instance_id: Uuid) -> Self { Self { disk_state: external::DiskState::Attached(instance_id) diff --git a/nexus/src/db/model/instance.rs b/nexus/src/db/model/instance.rs index e3086f16dd9..02f20451023 100644 --- a/nexus/src/db/model/instance.rs +++ b/nexus/src/db/model/instance.rs @@ -28,9 +28,6 @@ pub struct Instance { /// user data for instance initialization systems (e.g. cloud-init) pub user_data: Vec, - /// Child Resource generation number - pub rcgen: Generation, - /// runtime state of the Instance #[diesel(embed)] pub runtime_state: InstanceRuntimeState, @@ -49,7 +46,6 @@ impl Instance { identity, project_id, user_data: params.user_data.clone(), - rcgen: Generation::new(), runtime_state: runtime, } } diff --git a/nexus/src/db/schema.rs b/nexus/src/db/schema.rs index 2db9a3bc9d7..66d25ea7a8f 100644 --- a/nexus/src/db/schema.rs +++ b/nexus/src/db/schema.rs @@ -89,7 +89,6 @@ table! { time_deleted -> Nullable, project_id -> Uuid, user_data -> Binary, - rcgen -> Int8, state -> crate::db::model::InstanceStateEnum, time_state_updated -> Timestamptz, state_generation -> Int8, From 2d1531f6b1678121dae527c0c966a8d5be780a1e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 May 2022 12:21:00 -0400 Subject: [PATCH 25/29] de-duplicate type aliases --- nexus/src/db/collection_attach.rs | 86 +++++++++++++++----------- nexus/src/db/collection_detach.rs | 81 +++--------------------- nexus/src/db/collection_detach_many.rs | 78 ++--------------------- 3 files changed, 62 insertions(+), 183 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index ea3bf9111ac..ed627e23142 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -14,7 +14,7 @@ use super::cte_utils::{ BoxableTable, BoxableUpdateStatement, BoxedQuery, ExprSqlType, FilterBy, - QueryFromClause, QuerySqlType, + QueryFromClause, QuerySqlType, TableDefaultWhereClause, }; use super::pool::DbConnection; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; @@ -29,39 +29,53 @@ use diesel::query_source::Table; use diesel::sql_types::{BigInt, Nullable, SingleValue}; use std::fmt::Debug; -/// The table representing the collection. The resource references -/// this table. -type CollectionTable = <>::CollectionIdColumn as Column>::Table; -/// The table representing the resource. This table contains an -/// ID acting as a foreign key into the collection table. -type ResourceTable = <>::ResourceIdColumn as Column>::Table; -/// The default WHERE clause of the resource table. -type ResourceTableWhereClause = - as IntoUpdateTarget>::WhereClause; -type CollectionIdColumn = - >::CollectionIdColumn; -type ResourceIdColumn = - >::ResourceIdColumn; - -// Representation of Primary Key in Rust. -type CollectionPrimaryKey = - as Table>::PrimaryKey; -type ResourcePrimaryKey = - as Table>::PrimaryKey; -type ResourceForeignKey = - >::ResourceCollectionIdColumn; - -// Representation of Primary Key in SQL. -type SerializedCollectionPrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourcePrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourceForeignKey = - as diesel::Expression>::SqlType; +/// A collection of type aliases particularly relevant to collection-based CTEs. +pub(crate) mod aliases { + use super::{ + Column, DatastoreAttachTarget, Table, TableDefaultWhereClause, + }; + + /// The table representing the collection. The resource references + /// this table. + pub type CollectionTable = <>::CollectionIdColumn as Column>::Table; + /// The table representing the resource. This table contains an + /// ID acting as a foreign key into the collection table. + pub type ResourceTable = <>::ResourceIdColumn as Column>::Table; + + /// The default WHERE clause of the collection table. + pub type CollectionTableDefaultWhereClause = + TableDefaultWhereClause>; + /// The default WHERE clause of the resource table. + pub type ResourceTableDefaultWhereClause = + TableDefaultWhereClause>; + + pub type CollectionIdColumn = + >::CollectionIdColumn; + pub type ResourceIdColumn = + >::ResourceIdColumn; + + /// Representation of Primary Key in Rust. + pub type CollectionPrimaryKey = + as Table>::PrimaryKey; + pub type ResourcePrimaryKey = + as Table>::PrimaryKey; + pub type ResourceForeignKey = + >::ResourceCollectionIdColumn; + + /// Representation of Primary Key in SQL. + pub type SerializedCollectionPrimaryKey = + as diesel::Expression>::SqlType; + pub type SerializedResourcePrimaryKey = + as diesel::Expression>::SqlType; + pub type SerializedResourceForeignKey = + as diesel::Expression>::SqlType; +} + +use aliases::*; /// Trait to be implemented by structs representing an attachable collection. /// @@ -181,7 +195,7 @@ pub trait DatastoreAttachTarget: Selectable + Sized { // value. update: UpdateStatement< ResourceTable, - ResourceTableWhereClause, + ResourceTableDefaultWhereClause, V, >, ) -> AttachToCollectionStatement @@ -212,7 +226,7 @@ pub trait DatastoreAttachTarget: Selectable + Sized { // Allows calling "update.into_boxed()" UpdateStatement< ResourceTable, - ResourceTableWhereClause, + ResourceTableDefaultWhereClause, V, >: BoxableUpdateStatement, V>, // Allows calling diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 0899fa65e2b..6fee55f6441 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -10,9 +10,10 @@ //! - Validates conditions on both the collection and resource //! - Updates the resource row +use super::collection_attach::aliases::*; use super::cte_utils::{ BoxableTable, BoxableUpdateStatement, BoxedQuery, ExprSqlType, FilterBy, - QueryFromClause, QuerySqlType, TableDefaultWhereClause, + QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; use crate::db::collection_attach::DatastoreAttachTarget; @@ -28,74 +29,13 @@ use diesel::query_source::Table; use diesel::sql_types::{Nullable, SingleValue}; use std::fmt::Debug; -/// The table representing the collection. The resource references -/// this table. -type CollectionTable = <>::CollectionIdColumn as Column>::Table; - -/// The table representing the resource. This table contains an -/// ID acting as a foreign key into the collection table. -type ResourceTable = <>::ResourceIdColumn as Column>::Table; - -/// The default WHERE clause of the resource table. -type ResourceTableDefaultWhereClause = - TableDefaultWhereClause>; - -/// Helper to access column type. -type CollectionIdColumn = - >::CollectionIdColumn; -/// Helper to access column type. -type ResourceIdColumn = - >::ResourceIdColumn; - -// Representation of Primary Key in Rust. -type CollectionPrimaryKey = - as Table>::PrimaryKey; -type ResourcePrimaryKey = - as Table>::PrimaryKey; -type ResourceForeignKey = - >::ResourceCollectionIdColumn; - -// Representation of Primary Key in SQL. -type SerializedCollectionPrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourcePrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourceForeignKey = - as diesel::Expression>::SqlType; - /// Trait to be implemented by structs representing a detachable collection. /// /// A blanket implementation is provided for traits that implement /// [`DatastoreAttachTarget`]. -pub trait DatastoreDetachTarget: Selectable + Sized { - /// The Rust type of the collection and resource ids (typically Uuid). - type Id: Copy + Debug + PartialEq + Send + 'static; - - /// The primary key column of the collection. - type CollectionIdColumn: Column; - - /// The time deleted column in the CollectionTable - type CollectionTimeDeletedColumn: Column
::Table> - + Default - + ExpressionMethods; - - /// The primary key column of the resource - type ResourceIdColumn: Column; - - /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: Column
::Table> - + Default - + ExpressionMethods; - - /// The time deleted column in the ResourceTable - type ResourceTimeDeletedColumn: Column
::Table> - + Default - + ExpressionMethods; - +pub trait DatastoreDetachTarget: + DatastoreAttachTarget +{ /// Creates a statement for detaching a resource from the given collection. /// /// This statement allows callers to atomically check the state of a @@ -250,16 +190,9 @@ pub trait DatastoreDetachTarget: Selectable + Sized { } } -impl DatastoreDetachTarget for T -where - T: DatastoreAttachTarget, +impl DatastoreDetachTarget for T where + T: DatastoreAttachTarget { - type Id = T::Id; - type CollectionIdColumn = T::CollectionIdColumn; - type CollectionTimeDeletedColumn = T::CollectionTimeDeletedColumn; - type ResourceIdColumn = T::ResourceIdColumn; - type ResourceCollectionIdColumn = T::ResourceCollectionIdColumn; - type ResourceTimeDeletedColumn = T::ResourceTimeDeletedColumn; } /// The CTE described in the module docs diff --git a/nexus/src/db/collection_detach_many.rs b/nexus/src/db/collection_detach_many.rs index 4a99b49f006..a9932503080 100644 --- a/nexus/src/db/collection_detach_many.rs +++ b/nexus/src/db/collection_detach_many.rs @@ -10,9 +10,10 @@ //! - Updates the collection row //! - Updates the resource rows +use super::collection_attach::aliases::*; use super::cte_utils::{ BoxableTable, BoxableUpdateStatement, BoxedQuery, ExprSqlType, FilterBy, - QueryFromClause, QuerySqlType, TableDefaultWhereClause, + QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; use crate::db::collection_attach::DatastoreAttachTarget; @@ -28,75 +29,13 @@ use diesel::query_source::Table; use diesel::sql_types::{Nullable, SingleValue}; use std::fmt::Debug; -/// The table representing the collection. The resource references -/// this table. -type CollectionTable = <>::CollectionIdColumn as Column>::Table; - -/// The table representing the resource. This table contains an -/// ID acting as a foreign key into the collection table. -type ResourceTable = <>::ResourceIdColumn as Column>::Table; - -/// The default WHERE clause of the collection table. -type CollectionTableDefaultWhereClause = - TableDefaultWhereClause>; -/// The default WHERE clause of the resource table. -type ResourceTableDefaultWhereClause = - TableDefaultWhereClause>; -/// Helper to access column type. -type CollectionIdColumn = - >::CollectionIdColumn; - -// Representation of Primary Key in Rust. -type CollectionPrimaryKey = - as Table>::PrimaryKey; -type ResourcePrimaryKey = - as Table>::PrimaryKey; -type ResourceForeignKey = - >::ResourceCollectionIdColumn; - -// Representation of Primary Key in SQL. -type SerializedCollectionPrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourcePrimaryKey = - as diesel::Expression>::SqlType; -type SerializedResourceForeignKey = - as diesel::Expression>::SqlType; - /// Trait to be implemented by structs representing a detachable collection. /// /// A blanket implementation is provided for traits that implement /// [`DatastoreAttachTarget`]. pub trait DatastoreDetachManyTarget: - Selectable + Sized + DatastoreAttachTarget { - /// The Rust type of the collection and resource ids (typically Uuid). - type Id: Copy + Debug + PartialEq + Send + 'static; - - /// The primary key column of the collection. - type CollectionIdColumn: Column; - - /// The time deleted column in the CollectionTable - type CollectionTimeDeletedColumn: Column
::Table> - + Default - + ExpressionMethods; - - /// The primary key column of the resource - type ResourceIdColumn: Column; - - /// The column in the resource acting as a foreign key into the Collection - type ResourceCollectionIdColumn: Column
::Table> - + Default - + ExpressionMethods; - - /// The time deleted column in the ResourceTable - type ResourceTimeDeletedColumn: Column
::Table> - + Default - + ExpressionMethods; - /// Creates a statement for detaching a resource from the given collection. /// /// This statement allows callers to atomically check the state of a @@ -259,16 +198,9 @@ pub trait DatastoreDetachManyTarget: } } -impl DatastoreDetachManyTarget for T -where - T: DatastoreAttachTarget, +impl DatastoreDetachManyTarget for T where + T: DatastoreAttachTarget { - type Id = T::Id; - type CollectionIdColumn = T::CollectionIdColumn; - type CollectionTimeDeletedColumn = T::CollectionTimeDeletedColumn; - type ResourceIdColumn = T::ResourceIdColumn; - type ResourceCollectionIdColumn = T::ResourceCollectionIdColumn; - type ResourceTimeDeletedColumn = T::ResourceTimeDeletedColumn; } /// The CTE described in the module docs From 12ab5872fc8dc8a4b8a59d53a1dcdf79905435e4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 May 2022 12:24:38 -0400 Subject: [PATCH 26/29] patch docs --- nexus/src/db/collection_detach_many.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nexus/src/db/collection_detach_many.rs b/nexus/src/db/collection_detach_many.rs index a9932503080..31addf386f2 100644 --- a/nexus/src/db/collection_detach_many.rs +++ b/nexus/src/db/collection_detach_many.rs @@ -42,16 +42,18 @@ pub trait DatastoreDetachManyTarget: /// collection and a resource while detaching a resource. /// /// - `collection_id`: Primary key of the collection being removed from. - /// - `resource_id`: Primary key of the resource being detached. /// - `collection_query`: An optional query for collection state. The /// CTE will automatically filter this query to `collection_id`, and /// validate that the "time deleted" column is NULL. /// - `resource_query`: An optional query for the resource state. The - /// CTE will automatically filter this query to `resource_id`, + /// CTE will automatically filter this query to non-deleted resources. /// validate that the "time deleted" column is NULL, and validate that the /// "collection_id" column points to `collection_id`. - /// - `update`: An update statement, identifying how the resource object - /// should be modified to be detached + /// - `update_collection`: An update statement, identifying how the + /// collection object should be modified as associated resources are + /// detached. + /// - `update_resource`: An update statement, identifying how the resource + /// objects should be modified to be detached /// /// The VC, VR types refer to the "update target" of the UpdateStatements, /// and should generally be inferred rather than explicitly specified. From a043a379de220226f4f39f8a7bf0ad209ff3c7b9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 25 May 2022 14:06:56 -0400 Subject: [PATCH 27/29] review feedback --- nexus/src/app/instance.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 485dad39cda..6ea7a022ced 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -153,7 +153,7 @@ impl super::Nexus { // This operation may only occur on stopped instances, which implies that // the attached disks do not have any running "upstairs" process running - // within the Sled Agent. + // within the sled. pub async fn project_destroy_instance( &self, opctx: &OpContext, @@ -164,7 +164,7 @@ impl super::Nexus { // TODO-robustness We need to figure out what to do with Destroyed // instances? Presumably we need to clean them up at some point, but // not right away so that callers can see that they've been destroyed. - let (.., authz_instance, _db_instance) = + let (.., authz_instance, _) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -581,14 +581,14 @@ impl super::Nexus { instance_name: &Name, disk_name: &Name, ) -> UpdateResult { - let (.., authz_project, authz_disk, _db_disk) = + let (.., authz_project, authz_disk, _) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) .disk_name(disk_name) .fetch() .await?; - let (.., authz_instance, _db_instance) = + let (.., authz_instance, _) = LookupPath::new(opctx, &self.db_datastore) .project_id(authz_project.id()) .instance_name(instance_name) From 11fea7cd59f78720b3ed7c475fc88f853bd5e14c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 26 May 2022 13:42:06 -0400 Subject: [PATCH 28/29] Updated comment --- nexus/src/db/collection_detach.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/src/db/collection_detach.rs b/nexus/src/db/collection_detach.rs index 6fee55f6441..62974c41593 100644 --- a/nexus/src/db/collection_detach.rs +++ b/nexus/src/db/collection_detach.rs @@ -418,7 +418,7 @@ where /// // (SELECT 1) /// // LEFT JOIN (SELECT * FROM collection_by_id) ON TRUE /// // LEFT JOIN (SELECT * FROM resource_by_id) ON TRUE -/// // LEFT JOIN (SELECT * FROM resource) ON TRUE; +/// // LEFT JOIN (SELECT * FROM updated_resource) ON TRUE; /// ``` impl QueryFragment for DetachFromCollectionStatement From 317633cf17a4f890c95024183a9384dc332e7c7e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 27 May 2022 12:51:15 -0400 Subject: [PATCH 29/29] u32 max attach count --- nexus/src/db/collection_attach.rs | 6 +++--- nexus/src/db/datastore.rs | 4 +--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nexus/src/db/collection_attach.rs b/nexus/src/db/collection_attach.rs index ed627e23142..e008a06c8a8 100644 --- a/nexus/src/db/collection_attach.rs +++ b/nexus/src/db/collection_attach.rs @@ -182,7 +182,7 @@ pub trait DatastoreAttachTarget: Selectable + Sized { collection_query: BoxedQuery>, resource_query: BoxedQuery>, - max_attached_resources: usize, + max_attached_resources: u32, // We are intentionally picky about this update statement: // - The second argument - the WHERE clause - must match the default @@ -344,7 +344,7 @@ where // A (mostly) user-provided query for validating the resource. resource_query: Box + Send>, // The maximum number of resources which may be attached to the collection. - max_attached_resources: usize, + max_attached_resources: u32, // Update statement for the resource. update_resource_statement: @@ -1118,7 +1118,7 @@ mod test { setup_db(&pool).await; - const RESOURCE_COUNT: usize = 5; + const RESOURCE_COUNT: u32 = 5; let collection_id = uuid::Uuid::new_v4(); diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs index d692875119a..047df522949 100644 --- a/nexus/src/db/datastore.rs +++ b/nexus/src/db/datastore.rs @@ -1171,7 +1171,6 @@ impl DataStore { let attached_label = api::external::DiskState::Attached(authz_instance.id()).label(); - // TODO "u32" seems reasonable for the max disks value (input / output) let (instance, disk) = Instance::attach_resource( authz_instance.id(), authz_disk.id(), @@ -1181,8 +1180,7 @@ impl DataStore { disk::table .into_boxed() .filter(disk::dsl::disk_state.eq_any(ok_to_attach_disk_state_labels)), - // TODO: Remove unwrap? - usize::try_from(max_disks).unwrap(), + max_disks, diesel::update(disk::dsl::disk) .set(( disk::dsl::disk_state.eq(attached_label),