From 893be8e314e2d0219359798bdf3ccd0226b120b2 Mon Sep 17 00:00:00 2001 From: Joseph Koshakow Date: Wed, 29 Nov 2023 10:32:00 -0500 Subject: [PATCH] catalog: Remove usages of term Stash (#23532) This commit removes usages of the term Stash from comments, logs, strings, and variable names and replaces it with something more generic like "catalog" or "catalog storage". We are working on replacing the Stash as the backing durable store for the catalog with persist, which will make these usages inaccurate. Works towards resolving #22392 --- src/adapter/src/catalog.rs | 8 +++---- src/adapter/src/catalog/config.rs | 2 +- src/adapter/src/catalog/open.rs | 4 ++-- src/adapter/src/coord/timestamp_oracle.rs | 2 +- .../coord/timestamp_oracle/catalog_oracle.rs | 4 ++-- src/catalog-debug/src/main.rs | 16 ++++++------- src/catalog/src/durable/initialize.rs | 2 +- .../src/durable/objects/serialization.rs | 6 ++--- src/catalog/src/durable/transaction.rs | 22 ++++++++--------- src/catalog/tests/open.rs | 14 +++++------ src/cluster-client/src/client.rs | 2 +- src/environmentd/src/lib.rs | 24 +++++++++---------- src/environmentd/tests/server.rs | 2 +- src/sql/src/plan/statement/ddl.rs | 2 +- src/testdrive/src/action.rs | 2 +- 15 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/adapter/src/catalog.rs b/src/adapter/src/catalog.rs index b912d65ad24f7..52dc580b1a749 100644 --- a/src/adapter/src/catalog.rs +++ b/src/adapter/src/catalog.rs @@ -1169,7 +1169,7 @@ impl Catalog { // The user closure was successful, apply the updates. Terminate the // process if this fails, because we have to restart envd due to - // indeterminate stash state, which we only reconcile during catalog + // indeterminate catalog state, which we only reconcile during catalog // init. tx.commit() .await @@ -2793,7 +2793,7 @@ impl Catalog { })) })?; - // Update the Stash and Builtin Tables. + // Update the catalog storage and Builtin Tables. if !new_entry.item().is_temporary() { tx.update_item(*id, new_entry.clone().into())?; } @@ -3252,7 +3252,7 @@ impl Catalog { let var = state.get_system_configuration(name)?; tx.upsert_system_config(name, var.value())?; // This mirrors the `enabled_persist_txn_tables` "system var" into the - // catalog stash "config" collection so that we can toggle the flag with + // catalog storage "config" collection so that we can toggle the flag with // Launch Darkly, but use it in boot before Launch Darkly is available. if name == ENABLE_PERSIST_TXN_TABLES.name() { tx.set_enable_persist_txn_tables( @@ -4497,7 +4497,7 @@ mod tests { ) .await .expect("unable to open debug catalog"); - // Re-opening the same stash resets the transient_revision to 1. + // Re-opening the same catalog resets the transient_revision to 1. assert_eq!(catalog.transient_revision(), 1); catalog.expire().await; } diff --git a/src/adapter/src/catalog/config.rs b/src/adapter/src/catalog/config.rs index fc7ebe3160c99..98df0e4936429 100644 --- a/src/adapter/src/catalog/config.rs +++ b/src/adapter/src/catalog/config.rs @@ -31,7 +31,7 @@ use crate::config::SystemParameterSyncConfig; /// Configures a catalog. #[derive(Debug)] pub struct Config<'a> { - /// The connection to the stash. + /// The connection to the catalog storage. pub storage: Box, /// The registry that catalog uses to report metrics. pub metrics_registry: &'a MetricsRegistry, diff --git a/src/adapter/src/catalog/open.rs b/src/adapter/src/catalog/open.rs index 8508698fab237..45e8611475989 100644 --- a/src/adapter/src/catalog/open.rs +++ b/src/adapter/src/catalog/open.rs @@ -1087,7 +1087,7 @@ impl Catalog { match state.set_system_configuration_default(name, VarInput::Flat(value)) { Ok(_) => (), Err(AdapterError::VarError(VarError::UnknownParameter(name))) => { - warn!(%name, "cannot load unknown system parameter from stash"); + warn!(%name, "cannot load unknown system parameter from catalog storage"); } Err(e) => return Err(e), }; @@ -1096,7 +1096,7 @@ impl Catalog { match state.insert_system_configuration(&name, VarInput::Flat(&value)) { Ok(_) => (), Err(AdapterError::VarError(VarError::UnknownParameter(name))) => { - warn!(%name, "cannot load unknown system parameter from stash"); + warn!(%name, "cannot load unknown system parameter from catalog storage"); } Err(e) => return Err(e), }; diff --git a/src/adapter/src/coord/timestamp_oracle.rs b/src/adapter/src/coord/timestamp_oracle.rs index b9f064a9a8e20..03de5c396e72b 100644 --- a/src/adapter/src/coord/timestamp_oracle.rs +++ b/src/adapter/src/coord/timestamp_oracle.rs @@ -77,7 +77,7 @@ pub trait TimestampOracle { /// A shareable version of [`TimestampOracle`] that is `Send` and `Sync`. /// /// We have this as a stop-gap solution while we still keep the legacy -/// in-memory/backed-by-Stash TimestampOracle around. Once we remove that we can +/// in-memory/backed-by-catalog TimestampOracle around. Once we remove that we can /// make [`TimestampOracle`] shareable. #[async_trait] pub trait ShareableTimestampOracle { diff --git a/src/adapter/src/coord/timestamp_oracle/catalog_oracle.rs b/src/adapter/src/coord/timestamp_oracle/catalog_oracle.rs index ab0adf1ee7527..614b18abb2c25 100644 --- a/src/adapter/src/coord/timestamp_oracle/catalog_oracle.rs +++ b/src/adapter/src/coord/timestamp_oracle/catalog_oracle.rs @@ -237,8 +237,8 @@ where fn get_shared(&self) -> Option + Send + Sync>> { // The in-memory TimestampOracle is not shareable: // - // - we have in-memory state that we would have to share via an Arc/Mutec - // - we use TimestampPersistence, which is backed by Stash, which is also problematic for sharing + // - we have in-memory state that we would have to share via an Arc/Mutex + // - we use TimestampPersistence, which is backed by catalog, which is also problematic for sharing None } } diff --git a/src/catalog-debug/src/main.rs b/src/catalog-debug/src/main.rs index 134cf5b05e157..3ea9c25baa5ba 100644 --- a/src/catalog-debug/src/main.rs +++ b/src/catalog-debug/src/main.rs @@ -75,7 +75,7 @@ #![warn(clippy::from_over_into)] // END LINT CONFIG -//! Debug utility for stashes. +//! Debug utility for Catalog storage. use std::collections::BTreeMap; use std::fmt::Debug; @@ -160,34 +160,34 @@ enum CatalogKind { #[derive(Debug, clap::Subcommand)] enum Action { - /// Dumps the stash contents to stdout in a human readable format. + /// Dumps the catalog contents to stdout in a human readable format. /// Includes JSON for each key and value that can be hand edited and /// then passed to the `edit` or `delete` commands. Dump { /// Write output to specified path. Default stdout. target: Option, }, - /// Edits a single item in a collection in the stash. + /// Edits a single item in a collection in the catalog. Edit { - /// The name of the stash collection to edit. + /// The name of the catalog collection to edit. collection: String, /// The JSON-encoded key that identifies the item to edit. key: serde_json::Value, /// The new JSON-encoded value for the item. value: serde_json::Value, }, - /// Deletes a single item in a collection in the stash + /// Deletes a single item in a collection in the catalog Delete { - /// The name of the stash collection to edit. + /// The name of the catalog collection to edit. collection: String, /// The JSON-encoded key that identifies the item to delete. key: serde_json::Value, }, - /// Checks if the specified stash could be upgraded from its state to the + /// Checks if the specified catalog could be upgraded from its state to the /// adapter catalog at the version of this binary. Prints a success message /// or error message. Exits with 0 if the upgrade would succeed, otherwise /// non-zero. Can be used on a running environmentd. Operates without - /// interfering with it or committing any data to that stash. + /// interfering with it or committing any data to that catalog. UpgradeCheck { /// Map of cluster name to resource specification. Check the README for latest values. cluster_replica_sizes: Option, diff --git a/src/catalog/src/durable/initialize.rs b/src/catalog/src/durable/initialize.rs index 0aa488c8fcd73..4f318607ec8f1 100644 --- a/src/catalog/src/durable/initialize.rs +++ b/src/catalog/src/durable/initialize.rs @@ -39,7 +39,7 @@ use crate::durable::{ /// The key used within the "config" collection stores the deploy generation. pub(crate) const DEPLOY_GENERATION: &str = "deploy_generation"; -/// The key within the "config" Collection that stores the version of the Stash. +/// The key within the "config" Collection that stores the version of the catalog. pub(crate) const USER_VERSION_KEY: &str = "user_version"; /// The key used within the "config" collection where we store a mirror of the diff --git a/src/catalog/src/durable/objects/serialization.rs b/src/catalog/src/durable/objects/serialization.rs index 8fff77bfacfc2..007ceac3f3665 100644 --- a/src/catalog/src/durable/objects/serialization.rs +++ b/src/catalog/src/durable/objects/serialization.rs @@ -856,7 +856,7 @@ impl RustType for AclMode { fn from_proto(proto: proto::AclMode) -> Result { AclMode::from_bits(proto.bitflags).ok_or_else(|| { - TryFromProtoError::InvalidBitFlags(format!("Invalid AclMode from Stash {proto:?}")) + TryFromProtoError::InvalidBitFlags(format!("Invalid AclMode from catalog {proto:?}")) }) } } @@ -2155,7 +2155,7 @@ mod tests { // Assert there aren't any extra snapshots. assert!( filenames.is_empty(), - "Found snapshots for unsupported Stash versions {filenames:?}.\nIf you just increased `MIN_CATALOG_VERSION`, then please delete the old snapshots. If you created a new snapshot, please bump `CATALOG_VERSION`." + "Found snapshots for unsupported catalog versions {filenames:?}.\nIf you just increased `MIN_CATALOG_VERSION`, then please delete the old snapshots. If you created a new snapshot, please bump `CATALOG_VERSION`." ); } @@ -2186,7 +2186,7 @@ mod tests { .collect(); // Note: objects.proto and objects_v.proto should be exactly the same. The - // reason being, when bumping the Stash to the next version, CATALOG_VERSION + 1, we need a + // reason being, when bumping the catalog to the next version, CATALOG_VERSION + 1, we need a // snapshot to migrate _from_, which should be a snapshot of how the protos are today. // Hence why the two files should be exactly the same. similar_asserts::assert_eq!(current, snapshot); diff --git a/src/catalog/src/durable/transaction.rs b/src/catalog/src/durable/transaction.rs index 0a1a9d4423601..da08550282a1b 100644 --- a/src/catalog/src/durable/transaction.rs +++ b/src/catalog/src/durable/transaction.rs @@ -71,8 +71,8 @@ pub struct Transaction<'a> { system_configurations: TableTransaction, default_privileges: TableTransaction, system_privileges: TableTransaction, - // Don't make this a table transaction so that it's not read into the stash - // memory cache. + // Don't make this a table transaction so that it's not read into the + // in-memory cache. audit_log_updates: Vec<(proto::AuditLogKey, (), i64)>, storage_usage_updates: Vec<(proto::StorageUsageKey, (), i64)>, connection_timeout: Option, @@ -648,7 +648,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `id` is not found. /// - /// Runtime is linear with respect to the total number of items in the stash. + /// Runtime is linear with respect to the total number of items in the catalog. /// DO NOT call this function in a loop, use [`Self::remove_items`] instead. pub fn remove_item(&mut self, id: GlobalId) -> Result<(), CatalogError> { let prev = self.items.set(ItemKey { gid: id }, None)?; @@ -680,7 +680,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `id` is not found. /// - /// Runtime is linear with respect to the total number of items in the stash. + /// Runtime is linear with respect to the total number of items in the catalog. /// DO NOT call this function in a loop, use [`Self::update_items`] instead. pub fn update_item(&mut self, id: GlobalId, item: Item) -> Result<(), CatalogError> { let n = self.items.update(|k, v| { @@ -735,7 +735,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `id` is not found. /// - /// Runtime is linear with respect to the total number of items in the stash. + /// Runtime is linear with respect to the total number of items in the catalog. /// DO NOT call this function in a loop, implement and use some `Self::update_roles` instead. /// You should model it after [`Self::update_items`]. pub fn update_role(&mut self, id: RoleId, role: Role) -> Result<(), CatalogError> { @@ -785,7 +785,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `id` is not found. /// - /// Runtime is linear with respect to the total number of clusters in the stash. + /// Runtime is linear with respect to the total number of clusters in the catalog. /// DO NOT call this function in a loop. pub fn update_cluster(&mut self, id: ClusterId, cluster: Cluster) -> Result<(), CatalogError> { let n = self.clusters.update(|k, _v| { @@ -808,7 +808,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `replica_id` is not found. /// - /// Runtime is linear with respect to the total number of cluster replicas in the stash. + /// Runtime is linear with respect to the total number of cluster replicas in the catalog. /// DO NOT call this function in a loop. pub fn update_cluster_replica( &mut self, @@ -835,7 +835,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `id` is not found. /// - /// Runtime is linear with respect to the total number of databases in the stash. + /// Runtime is linear with respect to the total number of databases in the catalog. /// DO NOT call this function in a loop. pub fn update_database( &mut self, @@ -862,7 +862,7 @@ impl<'a> Transaction<'a> { /// /// Returns an error if `schema_id` is not found. /// - /// Runtime is linear with respect to the total number of schemas in the stash. + /// Runtime is linear with respect to the total number of schemas in the catalog. /// DO NOT call this function in a loop. pub fn update_schema( &mut self, @@ -1032,7 +1032,7 @@ impl<'a> Transaction<'a> { Ok(()) } - /// Updates the catalog stash `enable_persist_txn_tables` "config" value to + /// Updates the catalog `enable_persist_txn_tables` "config" value to /// match the `enable_persist_txn_tables` "system var" value. /// /// These are mirrored so that we can toggle the flag with Launch Darkly, @@ -1243,7 +1243,7 @@ impl<'a> Transaction<'a> { (txn_batch, self.durable_catalog) } - /// Commits the storage transaction to the stash. Any error returned indicates the stash may be + /// Commits the storage transaction to durable storage. Any error returned indicates the catalog may be /// in an indeterminate state and needs to be fully re-read before proceeding. In general, this /// must be fatal to the calling process. We do not panic/halt inside this function itself so /// that errors can bubble up during initialization. diff --git a/src/catalog/tests/open.rs b/src/catalog/tests/open.rs index 8e666473d250d..2743d0093fb75 100644 --- a/src/catalog/tests/open.rs +++ b/src/catalog/tests/open.rs @@ -138,12 +138,12 @@ async fn test_is_initialized( assert!( openable_state2.is_initialized().await.unwrap(), - "catalog has been opened yet" + "catalog has been opened" ); - // Check twice because some implementations will cache a read-only stash. + // Check twice because some implementations will cache a read-only connection. assert!( openable_state2.is_initialized().await.unwrap(), - "catalog has been opened yet" + "catalog has been opened" ); } @@ -200,7 +200,7 @@ async fn test_get_deployment_generation( Some(42), "deployment generation has been set to 42" ); - // Check twice because some implementations will cache a read-only stash. + // Check twice because some implementations will cache a read-only connection. assert_eq!( openable_state2.get_deployment_generation().await.unwrap(), Some(42), @@ -283,7 +283,7 @@ async fn test_open_savepoint( CatalogError::Durable(e) => assert!(e.can_recover_with_write_mode()), } - // Initialize the stash. + // Initialize the catalog. { let mut state = Box::new(openable_state2) .open(SYSTEM_TIME(), &test_bootstrap_args(), None) @@ -419,7 +419,7 @@ async fn test_open_read_only( openable_state2: impl OpenableDurableCatalogState, openable_state3: impl OpenableDurableCatalogState, ) { - // Can't open a read-only stash until it's been initialized. + // Can't open a read-only catalog until it's been initialized. let err = Box::new(openable_state1) .open_read_only(SYSTEM_TIME(), &test_bootstrap_args()) .await @@ -429,7 +429,7 @@ async fn test_open_read_only( CatalogError::Durable(e) => assert!(e.can_recover_with_write_mode()), } - // Initialize the stash. + // Initialize the catalog. let mut state = Box::new(openable_state2) .open(SYSTEM_TIME(), &test_bootstrap_args(), None) .await diff --git a/src/cluster-client/src/client.rs b/src/cluster-client/src/client.rs index 3ed771bbd91d6..4ae52180c3808 100644 --- a/src/cluster-client/src/client.rs +++ b/src/cluster-client/src/client.rs @@ -28,7 +28,7 @@ include!(concat!(env!("OUT_DIR"), "/mz_cluster_client.client.rs")); /// must be totally ordered, and any value (for a given replica) must /// be greater than any that were generated before (for that replica). /// This is the reason for having two -/// components (one from the stash that increases on every environmentd restart, +/// components (one from the catalog storage that increases on every environmentd restart, /// another in-memory and local to the current incarnation of environmentd) #[derive(PartialEq, Eq, Debug, Copy, Clone, Serialize, Deserialize)] pub struct ClusterStartupEpoch { diff --git a/src/environmentd/src/lib.rs b/src/environmentd/src/lib.rs index e7033b5eaf128..4b74703c3f75b 100644 --- a/src/environmentd/src/lib.rs +++ b/src/environmentd/src/lib.rs @@ -164,7 +164,7 @@ pub struct Config { /// one. /// /// If specified, this overrides the value stored in Launch Darkly (and - /// mirrored to the catalog stash's "config" collection). + /// mirrored to the catalog storage's "config" collection). pub enable_persist_txn_tables_cli: Option, // === Adapter options. === @@ -409,14 +409,14 @@ impl Listeners { .await?; if !openable_adapter_storage.is_initialized().await? { - tracing::info!("Stash doesn't exist so there's no current deploy generation. We won't wait to be leader"); + tracing::info!("Catalog storage doesn't exist so there's no current deploy generation. We won't wait to be leader"); break 'leader_promotion; } - // TODO: once all stashes have a deploy_generation, don't need to handle the Option - let stash_generation = openable_adapter_storage.get_deployment_generation().await?; - tracing::info!("Found stash generation {stash_generation:?}"); - if stash_generation < Some(deploy_generation) { - tracing::info!("Stash generation {stash_generation:?} is less than deploy generation {deploy_generation}. Performing pre-flight checks"); + // TODO: once all catalogs have a deploy_generation, don't need to handle the Option + let catalog_generation = openable_adapter_storage.get_deployment_generation().await?; + tracing::info!("Found catalog generation {catalog_generation:?}"); + if catalog_generation < Some(deploy_generation) { + tracing::info!("Catalog generation {catalog_generation:?} is less than deploy generation {deploy_generation}. Performing pre-flight checks"); match openable_adapter_storage .open_savepoint( boot_ts.clone(), @@ -433,7 +433,7 @@ impl Listeners { Ok(adapter_storage) => Box::new(adapter_storage).expire().await, Err(e) => { return Err( - anyhow!(e).context("Stash upgrade would have failed with this error") + anyhow!(e).context("Catalog upgrade would have failed with this error") ) } } @@ -450,10 +450,10 @@ impl Listeners { "internal http server closed its end of promote_leader" )); } - } else if stash_generation == Some(deploy_generation) { - tracing::info!("Server requested generation {deploy_generation} which is equal to stash's generation"); + } else if catalog_generation == Some(deploy_generation) { + tracing::info!("Server requested generation {deploy_generation} which is equal to catalog's generation"); } else { - mz_ore::halt!("Server started with requested generation {deploy_generation} but stash was already at {stash_generation:?}. Deploy generations must increase monotonically"); + mz_ore::halt!("Server started with requested generation {deploy_generation} but catalog was already at {catalog_generation:?}. Deploy generations must increase monotonically"); } } @@ -509,7 +509,7 @@ impl Listeners { enable_persist_txn_tables = value; } info!( - "enable_persist_txn_tables value of {} computed from stash {:?} and flag {:?}", + "enable_persist_txn_tables value of {} computed from catalog {:?} and flag {:?}", enable_persist_txn_tables, enable_persist_txn_tables_stash_ld, config.enable_persist_txn_tables_cli, diff --git a/src/environmentd/tests/server.rs b/src/environmentd/tests/server.rs index 2db994a3a41f4..71ee14d0d49f2 100644 --- a/src/environmentd/tests/server.rs +++ b/src/environmentd/tests/server.rs @@ -2249,7 +2249,7 @@ fn test_leader_promotion() { .unsafe_mode() .data_directory(tmpdir.path()); { - // start with a stash with no deploy generation to match current production + // start with a catalog with no deploy generation to match current production let server = harness.clone().start_blocking(); let mut client = server.connect(postgres::NoTls).unwrap(); client.simple_query("SELECT 1").unwrap(); diff --git a/src/sql/src/plan/statement/ddl.rs b/src/sql/src/plan/statement/ddl.rs index d7b86a7387255..18fdffa0408a9 100644 --- a/src/sql/src/plan/statement/ddl.rs +++ b/src/sql/src/plan/statement/ddl.rs @@ -5390,7 +5390,7 @@ pub fn plan_comment( } }; - // Note: the `mz_comments` table uses an `Int4` for the column position, but in the Stash we + // Note: the `mz_comments` table uses an `Int4` for the column position, but in the catalog storage we // store a `usize` which would be a `Uint8`. We guard against a safe conversion here because // it's the easiest place to raise an error. // diff --git a/src/testdrive/src/action.rs b/src/testdrive/src/action.rs index 0c01aba1cd22c..c86bed2b3fa41 100644 --- a/src/testdrive/src/action.rs +++ b/src/testdrive/src/action.rs @@ -297,7 +297,7 @@ impl State { Ok(()) } - /// Makes of copy of the stash's catalog and runs a function on its + /// Makes of copy of the durable catalog and runs a function on its /// state. Returns `None` if there's no catalog information in the State. pub async fn with_catalog_copy(&self, f: F) -> Result, anyhow::Error> where