Skip to content

Commit

Permalink
catalog: Remove usages of term Stash (#23532)
Browse files Browse the repository at this point in the history
This commit removes usages of the term Stash from comments, logs,
strings, and variable names and replaces it with something more generic
like "catalog" or "catalog storage". We are working on replacing the
Stash as the backing durable store for the catalog with persist, which
will make these usages inaccurate.

Works towards resolving #22392
  • Loading branch information
jkosh44 authored Nov 29, 2023
1 parent 8edcc2b commit 893be8e
Show file tree
Hide file tree
Showing 15 changed files with 56 additions and 56 deletions.
8 changes: 4 additions & 4 deletions src/adapter/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1169,7 +1169,7 @@ impl Catalog {

// The user closure was successful, apply the updates. Terminate the
// process if this fails, because we have to restart envd due to
// indeterminate stash state, which we only reconcile during catalog
// indeterminate catalog state, which we only reconcile during catalog
// init.
tx.commit()
.await
Expand Down Expand Up @@ -2793,7 +2793,7 @@ impl Catalog {
}))
})?;

// Update the Stash and Builtin Tables.
// Update the catalog storage and Builtin Tables.
if !new_entry.item().is_temporary() {
tx.update_item(*id, new_entry.clone().into())?;
}
Expand Down Expand Up @@ -3252,7 +3252,7 @@ impl Catalog {
let var = state.get_system_configuration(name)?;
tx.upsert_system_config(name, var.value())?;
// This mirrors the `enabled_persist_txn_tables` "system var" into the
// catalog stash "config" collection so that we can toggle the flag with
// catalog storage "config" collection so that we can toggle the flag with
// Launch Darkly, but use it in boot before Launch Darkly is available.
if name == ENABLE_PERSIST_TXN_TABLES.name() {
tx.set_enable_persist_txn_tables(
Expand Down Expand Up @@ -4497,7 +4497,7 @@ mod tests {
)
.await
.expect("unable to open debug catalog");
// Re-opening the same stash resets the transient_revision to 1.
// Re-opening the same catalog resets the transient_revision to 1.
assert_eq!(catalog.transient_revision(), 1);
catalog.expire().await;
}
Expand Down
2 changes: 1 addition & 1 deletion src/adapter/src/catalog/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use crate::config::SystemParameterSyncConfig;
/// Configures a catalog.
#[derive(Debug)]
pub struct Config<'a> {
/// The connection to the stash.
/// The connection to the catalog storage.
pub storage: Box<dyn mz_catalog::durable::DurableCatalogState>,
/// The registry that catalog uses to report metrics.
pub metrics_registry: &'a MetricsRegistry,
Expand Down
4 changes: 2 additions & 2 deletions src/adapter/src/catalog/open.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1087,7 +1087,7 @@ impl Catalog {
match state.set_system_configuration_default(name, VarInput::Flat(value)) {
Ok(_) => (),
Err(AdapterError::VarError(VarError::UnknownParameter(name))) => {
warn!(%name, "cannot load unknown system parameter from stash");
warn!(%name, "cannot load unknown system parameter from catalog storage");
}
Err(e) => return Err(e),
};
Expand All @@ -1096,7 +1096,7 @@ impl Catalog {
match state.insert_system_configuration(&name, VarInput::Flat(&value)) {
Ok(_) => (),
Err(AdapterError::VarError(VarError::UnknownParameter(name))) => {
warn!(%name, "cannot load unknown system parameter from stash");
warn!(%name, "cannot load unknown system parameter from catalog storage");
}
Err(e) => return Err(e),
};
Expand Down
2 changes: 1 addition & 1 deletion src/adapter/src/coord/timestamp_oracle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ pub trait TimestampOracle<T> {
/// A shareable version of [`TimestampOracle`] that is `Send` and `Sync`.
///
/// We have this as a stop-gap solution while we still keep the legacy
/// in-memory/backed-by-Stash TimestampOracle around. Once we remove that we can
/// in-memory/backed-by-catalog TimestampOracle around. Once we remove that we can
/// make [`TimestampOracle`] shareable.
#[async_trait]
pub trait ShareableTimestampOracle<T> {
Expand Down
4 changes: 2 additions & 2 deletions src/adapter/src/coord/timestamp_oracle/catalog_oracle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,8 +237,8 @@ where
fn get_shared(&self) -> Option<Arc<dyn ShareableTimestampOracle<T> + Send + Sync>> {
// The in-memory TimestampOracle is not shareable:
//
// - we have in-memory state that we would have to share via an Arc/Mutec
// - we use TimestampPersistence, which is backed by Stash, which is also problematic for sharing
// - we have in-memory state that we would have to share via an Arc/Mutex
// - we use TimestampPersistence, which is backed by catalog, which is also problematic for sharing
None
}
}
Expand Down
16 changes: 8 additions & 8 deletions src/catalog-debug/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
#![warn(clippy::from_over_into)]
// END LINT CONFIG

//! Debug utility for stashes.
//! Debug utility for Catalog storage.
use std::collections::BTreeMap;
use std::fmt::Debug;
Expand Down Expand Up @@ -160,34 +160,34 @@ enum CatalogKind {

#[derive(Debug, clap::Subcommand)]
enum Action {
/// Dumps the stash contents to stdout in a human readable format.
/// Dumps the catalog contents to stdout in a human readable format.
/// Includes JSON for each key and value that can be hand edited and
/// then passed to the `edit` or `delete` commands.
Dump {
/// Write output to specified path. Default stdout.
target: Option<PathBuf>,
},
/// Edits a single item in a collection in the stash.
/// Edits a single item in a collection in the catalog.
Edit {
/// The name of the stash collection to edit.
/// The name of the catalog collection to edit.
collection: String,
/// The JSON-encoded key that identifies the item to edit.
key: serde_json::Value,
/// The new JSON-encoded value for the item.
value: serde_json::Value,
},
/// Deletes a single item in a collection in the stash
/// Deletes a single item in a collection in the catalog
Delete {
/// The name of the stash collection to edit.
/// The name of the catalog collection to edit.
collection: String,
/// The JSON-encoded key that identifies the item to delete.
key: serde_json::Value,
},
/// Checks if the specified stash could be upgraded from its state to the
/// Checks if the specified catalog could be upgraded from its state to the
/// adapter catalog at the version of this binary. Prints a success message
/// or error message. Exits with 0 if the upgrade would succeed, otherwise
/// non-zero. Can be used on a running environmentd. Operates without
/// interfering with it or committing any data to that stash.
/// interfering with it or committing any data to that catalog.
UpgradeCheck {
/// Map of cluster name to resource specification. Check the README for latest values.
cluster_replica_sizes: Option<String>,
Expand Down
2 changes: 1 addition & 1 deletion src/catalog/src/durable/initialize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ use crate::durable::{

/// The key used within the "config" collection stores the deploy generation.
pub(crate) const DEPLOY_GENERATION: &str = "deploy_generation";
/// The key within the "config" Collection that stores the version of the Stash.
/// The key within the "config" Collection that stores the version of the catalog.
pub(crate) const USER_VERSION_KEY: &str = "user_version";

/// The key used within the "config" collection where we store a mirror of the
Expand Down
6 changes: 3 additions & 3 deletions src/catalog/src/durable/objects/serialization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -856,7 +856,7 @@ impl RustType<proto::AclMode> for AclMode {

fn from_proto(proto: proto::AclMode) -> Result<Self, TryFromProtoError> {
AclMode::from_bits(proto.bitflags).ok_or_else(|| {
TryFromProtoError::InvalidBitFlags(format!("Invalid AclMode from Stash {proto:?}"))
TryFromProtoError::InvalidBitFlags(format!("Invalid AclMode from catalog {proto:?}"))
})
}
}
Expand Down Expand Up @@ -2155,7 +2155,7 @@ mod tests {
// Assert there aren't any extra snapshots.
assert!(
filenames.is_empty(),
"Found snapshots for unsupported Stash versions {filenames:?}.\nIf you just increased `MIN_CATALOG_VERSION`, then please delete the old snapshots. If you created a new snapshot, please bump `CATALOG_VERSION`."
"Found snapshots for unsupported catalog versions {filenames:?}.\nIf you just increased `MIN_CATALOG_VERSION`, then please delete the old snapshots. If you created a new snapshot, please bump `CATALOG_VERSION`."
);
}

Expand Down Expand Up @@ -2186,7 +2186,7 @@ mod tests {
.collect();

// Note: objects.proto and objects_v<CATALOG_VERSION>.proto should be exactly the same. The
// reason being, when bumping the Stash to the next version, CATALOG_VERSION + 1, we need a
// reason being, when bumping the catalog to the next version, CATALOG_VERSION + 1, we need a
// snapshot to migrate _from_, which should be a snapshot of how the protos are today.
// Hence why the two files should be exactly the same.
similar_asserts::assert_eq!(current, snapshot);
Expand Down
22 changes: 11 additions & 11 deletions src/catalog/src/durable/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ pub struct Transaction<'a> {
system_configurations: TableTransaction<ServerConfigurationKey, ServerConfigurationValue>,
default_privileges: TableTransaction<DefaultPrivilegesKey, DefaultPrivilegesValue>,
system_privileges: TableTransaction<SystemPrivilegesKey, SystemPrivilegesValue>,
// Don't make this a table transaction so that it's not read into the stash
// memory cache.
// Don't make this a table transaction so that it's not read into the
// in-memory cache.
audit_log_updates: Vec<(proto::AuditLogKey, (), i64)>,
storage_usage_updates: Vec<(proto::StorageUsageKey, (), i64)>,
connection_timeout: Option<Duration>,
Expand Down Expand Up @@ -648,7 +648,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `id` is not found.
///
/// Runtime is linear with respect to the total number of items in the stash.
/// Runtime is linear with respect to the total number of items in the catalog.
/// DO NOT call this function in a loop, use [`Self::remove_items`] instead.
pub fn remove_item(&mut self, id: GlobalId) -> Result<(), CatalogError> {
let prev = self.items.set(ItemKey { gid: id }, None)?;
Expand Down Expand Up @@ -680,7 +680,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `id` is not found.
///
/// Runtime is linear with respect to the total number of items in the stash.
/// Runtime is linear with respect to the total number of items in the catalog.
/// DO NOT call this function in a loop, use [`Self::update_items`] instead.
pub fn update_item(&mut self, id: GlobalId, item: Item) -> Result<(), CatalogError> {
let n = self.items.update(|k, v| {
Expand Down Expand Up @@ -735,7 +735,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `id` is not found.
///
/// Runtime is linear with respect to the total number of items in the stash.
/// Runtime is linear with respect to the total number of items in the catalog.
/// DO NOT call this function in a loop, implement and use some `Self::update_roles` instead.
/// You should model it after [`Self::update_items`].
pub fn update_role(&mut self, id: RoleId, role: Role) -> Result<(), CatalogError> {
Expand Down Expand Up @@ -785,7 +785,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `id` is not found.
///
/// Runtime is linear with respect to the total number of clusters in the stash.
/// Runtime is linear with respect to the total number of clusters in the catalog.
/// DO NOT call this function in a loop.
pub fn update_cluster(&mut self, id: ClusterId, cluster: Cluster) -> Result<(), CatalogError> {
let n = self.clusters.update(|k, _v| {
Expand All @@ -808,7 +808,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `replica_id` is not found.
///
/// Runtime is linear with respect to the total number of cluster replicas in the stash.
/// Runtime is linear with respect to the total number of cluster replicas in the catalog.
/// DO NOT call this function in a loop.
pub fn update_cluster_replica(
&mut self,
Expand All @@ -835,7 +835,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `id` is not found.
///
/// Runtime is linear with respect to the total number of databases in the stash.
/// Runtime is linear with respect to the total number of databases in the catalog.
/// DO NOT call this function in a loop.
pub fn update_database(
&mut self,
Expand All @@ -862,7 +862,7 @@ impl<'a> Transaction<'a> {
///
/// Returns an error if `schema_id` is not found.
///
/// Runtime is linear with respect to the total number of schemas in the stash.
/// Runtime is linear with respect to the total number of schemas in the catalog.
/// DO NOT call this function in a loop.
pub fn update_schema(
&mut self,
Expand Down Expand Up @@ -1032,7 +1032,7 @@ impl<'a> Transaction<'a> {
Ok(())
}

/// Updates the catalog stash `enable_persist_txn_tables` "config" value to
/// Updates the catalog `enable_persist_txn_tables` "config" value to
/// match the `enable_persist_txn_tables` "system var" value.
///
/// These are mirrored so that we can toggle the flag with Launch Darkly,
Expand Down Expand Up @@ -1243,7 +1243,7 @@ impl<'a> Transaction<'a> {
(txn_batch, self.durable_catalog)
}

/// Commits the storage transaction to the stash. Any error returned indicates the stash may be
/// Commits the storage transaction to durable storage. Any error returned indicates the catalog may be
/// in an indeterminate state and needs to be fully re-read before proceeding. In general, this
/// must be fatal to the calling process. We do not panic/halt inside this function itself so
/// that errors can bubble up during initialization.
Expand Down
14 changes: 7 additions & 7 deletions src/catalog/tests/open.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,12 +138,12 @@ async fn test_is_initialized(

assert!(
openable_state2.is_initialized().await.unwrap(),
"catalog has been opened yet"
"catalog has been opened"
);
// Check twice because some implementations will cache a read-only stash.
// Check twice because some implementations will cache a read-only connection.
assert!(
openable_state2.is_initialized().await.unwrap(),
"catalog has been opened yet"
"catalog has been opened"
);
}

Expand Down Expand Up @@ -200,7 +200,7 @@ async fn test_get_deployment_generation(
Some(42),
"deployment generation has been set to 42"
);
// Check twice because some implementations will cache a read-only stash.
// Check twice because some implementations will cache a read-only connection.
assert_eq!(
openable_state2.get_deployment_generation().await.unwrap(),
Some(42),
Expand Down Expand Up @@ -283,7 +283,7 @@ async fn test_open_savepoint(
CatalogError::Durable(e) => assert!(e.can_recover_with_write_mode()),
}

// Initialize the stash.
// Initialize the catalog.
{
let mut state = Box::new(openable_state2)
.open(SYSTEM_TIME(), &test_bootstrap_args(), None)
Expand Down Expand Up @@ -419,7 +419,7 @@ async fn test_open_read_only(
openable_state2: impl OpenableDurableCatalogState,
openable_state3: impl OpenableDurableCatalogState,
) {
// Can't open a read-only stash until it's been initialized.
// Can't open a read-only catalog until it's been initialized.
let err = Box::new(openable_state1)
.open_read_only(SYSTEM_TIME(), &test_bootstrap_args())
.await
Expand All @@ -429,7 +429,7 @@ async fn test_open_read_only(
CatalogError::Durable(e) => assert!(e.can_recover_with_write_mode()),
}

// Initialize the stash.
// Initialize the catalog.
let mut state = Box::new(openable_state2)
.open(SYSTEM_TIME(), &test_bootstrap_args(), None)
.await
Expand Down
2 changes: 1 addition & 1 deletion src/cluster-client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ include!(concat!(env!("OUT_DIR"), "/mz_cluster_client.client.rs"));
/// must be totally ordered, and any value (for a given replica) must
/// be greater than any that were generated before (for that replica).
/// This is the reason for having two
/// components (one from the stash that increases on every environmentd restart,
/// components (one from the catalog storage that increases on every environmentd restart,
/// another in-memory and local to the current incarnation of environmentd)
#[derive(PartialEq, Eq, Debug, Copy, Clone, Serialize, Deserialize)]
pub struct ClusterStartupEpoch {
Expand Down
24 changes: 12 additions & 12 deletions src/environmentd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ pub struct Config {
/// one.
///
/// If specified, this overrides the value stored in Launch Darkly (and
/// mirrored to the catalog stash's "config" collection).
/// mirrored to the catalog storage's "config" collection).
pub enable_persist_txn_tables_cli: Option<bool>,

// === Adapter options. ===
Expand Down Expand Up @@ -409,14 +409,14 @@ impl Listeners {
.await?;

if !openable_adapter_storage.is_initialized().await? {
tracing::info!("Stash doesn't exist so there's no current deploy generation. We won't wait to be leader");
tracing::info!("Catalog storage doesn't exist so there's no current deploy generation. We won't wait to be leader");
break 'leader_promotion;
}
// TODO: once all stashes have a deploy_generation, don't need to handle the Option
let stash_generation = openable_adapter_storage.get_deployment_generation().await?;
tracing::info!("Found stash generation {stash_generation:?}");
if stash_generation < Some(deploy_generation) {
tracing::info!("Stash generation {stash_generation:?} is less than deploy generation {deploy_generation}. Performing pre-flight checks");
// TODO: once all catalogs have a deploy_generation, don't need to handle the Option
let catalog_generation = openable_adapter_storage.get_deployment_generation().await?;
tracing::info!("Found catalog generation {catalog_generation:?}");
if catalog_generation < Some(deploy_generation) {
tracing::info!("Catalog generation {catalog_generation:?} is less than deploy generation {deploy_generation}. Performing pre-flight checks");
match openable_adapter_storage
.open_savepoint(
boot_ts.clone(),
Expand All @@ -433,7 +433,7 @@ impl Listeners {
Ok(adapter_storage) => Box::new(adapter_storage).expire().await,
Err(e) => {
return Err(
anyhow!(e).context("Stash upgrade would have failed with this error")
anyhow!(e).context("Catalog upgrade would have failed with this error")
)
}
}
Expand All @@ -450,10 +450,10 @@ impl Listeners {
"internal http server closed its end of promote_leader"
));
}
} else if stash_generation == Some(deploy_generation) {
tracing::info!("Server requested generation {deploy_generation} which is equal to stash's generation");
} else if catalog_generation == Some(deploy_generation) {
tracing::info!("Server requested generation {deploy_generation} which is equal to catalog's generation");
} else {
mz_ore::halt!("Server started with requested generation {deploy_generation} but stash was already at {stash_generation:?}. Deploy generations must increase monotonically");
mz_ore::halt!("Server started with requested generation {deploy_generation} but catalog was already at {catalog_generation:?}. Deploy generations must increase monotonically");
}
}

Expand Down Expand Up @@ -509,7 +509,7 @@ impl Listeners {
enable_persist_txn_tables = value;
}
info!(
"enable_persist_txn_tables value of {} computed from stash {:?} and flag {:?}",
"enable_persist_txn_tables value of {} computed from catalog {:?} and flag {:?}",
enable_persist_txn_tables,
enable_persist_txn_tables_stash_ld,
config.enable_persist_txn_tables_cli,
Expand Down
2 changes: 1 addition & 1 deletion src/environmentd/tests/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2249,7 +2249,7 @@ fn test_leader_promotion() {
.unsafe_mode()
.data_directory(tmpdir.path());
{
// start with a stash with no deploy generation to match current production
// start with a catalog with no deploy generation to match current production
let server = harness.clone().start_blocking();
let mut client = server.connect(postgres::NoTls).unwrap();
client.simple_query("SELECT 1").unwrap();
Expand Down
Loading

0 comments on commit 893be8e

Please sign in to comment.