Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
NathanBaulch committed Sep 13, 2024
1 parent 9219530 commit 1548fe1
Show file tree
Hide file tree
Showing 32 changed files with 37 additions and 37 deletions.
2 changes: 1 addition & 1 deletion cmd/atlas/internal/cmdapi/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -1321,7 +1321,7 @@ const applyLockValue = "atlas_migrate_execute"

func checkRevisionSchemaClarity(cmd *cobra.Command, c *sqlclient.Client, revisionSchemaFlag string) error {
// The "old" default behavior for the revision schema location was to store the revision table in its own schema.
// Now, the table is saved in the connected schema, if any. To keep the backwards compatability, we now require
// Now, the table is saved in the connected schema, if any. To keep the backwards compatibility, we now require
// for schema bound connections to have the schema-revision flag present if there is no revision table in the schema
// but the old default schema does have one.
if c.URL.Schema != "" && revisionSchemaFlag == "" {
Expand Down
4 changes: 2 additions & 2 deletions cmd/atlas/internal/cmdext/cmdext_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ data "aws_rds_token" "token" {
username = "root"
endpoint = "localhost:3306"
region = "us-east-1"
profile = "errorneous"
profile = "erroneous"
}
v = data.aws_rds_token.token
Expand All @@ -112,7 +112,7 @@ v = data.aws_rds_token.token
state = schemahcl.New(cmdext.SpecOptions...)
)
err := state.EvalBytes([]byte(doc), &v, nil)
require.EqualError(t, err, "data.aws_rds_token.token: loading aws config: failed to get shared config profile, errorneous")
require.EqualError(t, err, "data.aws_rds_token.token: loading aws config: failed to get shared config profile, erroneous")
}

func TestGCPToken(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion cmd/atlas/internal/docker/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ type (
Image string
// Env vars to pass to the docker container.
Env []string
// Internal Port to expose anc connect to.
// Internal Port to expose and connect to.
Port string
// Database name to create and connect on init.
Database string
Expand Down
2 changes: 1 addition & 1 deletion cmd/atlas/internal/sqlparse/pgparse/pgparse.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func (p *Parser) FixChange(_ migrate.Driver, s string, changes schema.Changes) (

func expectModify(changes schema.Changes) (*schema.ModifyTable, error) {
if len(changes) != 1 {
return nil, fmt.Errorf("unexpected number fo changes: %d", len(changes))
return nil, fmt.Errorf("unexpected number of changes: %d", len(changes))
}
modify, ok := changes[0].(*schema.ModifyTable)
if !ok {
Expand Down
2 changes: 1 addition & 1 deletion cmd/atlas/internal/sqlparse/sqliteparse/sqliteparse.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ func (p *FileParser) FixChange(_ migrate.Driver, s string, changes schema.Change
}
if r, ok := stmt.RenameColumn(); ok {
if len(changes) != 1 {
return nil, fmt.Errorf("unexpected number fo changes: %d", len(changes))
return nil, fmt.Errorf("unexpected number of changes: %d", len(changes))
}
modify, ok := changes[0].(*schema.ModifyTable)
if !ok {
Expand Down
2 changes: 1 addition & 1 deletion doc/md/features.mdx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
title: Feature Compatability
title: Feature Compatibility
id: features
slug: features
---
Expand Down
2 changes: 1 addition & 1 deletion doc/md/guides/database-per-tenant/02-db-groups.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ Let's review the code snippet above:

In some cases, you may want to load target groups dynamically from an API endpoint. For example, you might have a
service `tenant-svc` that provides a list of tenant databases based on some criteria. Let's suppose this service's
endpoints recieve the target group ID in the path, such as `https://tenant-svc/api/target-group/{id}` and return a
endpoints receive the target group ID in the path, such as `https://tenant-svc/api/target-group/{id}` and return a
simple JSON payload:

```json
Expand Down
2 changes: 1 addition & 1 deletion doc/md/guides/deploying/helm.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ title: Deploying schema migrations to Kubernetes with Helm

:::note Deprecation Notice

This method of running schema migrations is deprecated an no longer recommended.
This method of running schema migrations is deprecated and no longer recommended.

Please use the [Kubernetes Operator](k8s-argo.md) to manage schema migrations in Kubernetes.

Expand Down
4 changes: 2 additions & 2 deletions doc/md/guides/deploying/init.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ title: Deploying schema migrations to Kubernetes with Init Containers

:::note Deprecation Notice

This method of running schema migrations is deprecated an no longer recommended.
This method of running schema migrations is deprecated and no longer recommended.

Please use the [Kubernetes Operator](k8s-argo.md) to manage schema migrations in Kubernetes.

Expand Down Expand Up @@ -156,7 +156,7 @@ spec:
```

Notice the new configuration blocks we added to our deployment manifest:
* We added our secret `atlas-project` [as a volume](https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/#configure-a-volume-for-a-pod) to the
* We added our secret `atlas-project` [as a volume](https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/#configure-a-volume-for-a-pod) to
the deployment's PodSpec.
* We add an `initContainer` named `migrate` that runs the `ghcr.io/repo/migrations:v0.1.2` image.
* We mounted the `atlas-project` volume at `/etc/atlas` in our init container.
Expand Down
2 changes: 1 addition & 1 deletion doc/md/guides/migration-tools/golang-migrate.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ and it's _desired_ state.

For golang-migrate users, the current state can be thought of as the sum of
all _up_ migrations in a migration directory. The desired state can be provided to Atlas
via an a Atlas schema [HCL file](https://atlasgo.io/atlas-schema/hcl), a plain SQL file, or as a
via an Atlas schema [HCL file](https://atlasgo.io/atlas-schema/hcl), a plain SQL file, or as a
connection string to a database that contains the desired schema.

In this guide, we will show how Atlas can automatically plan schema migrations for
Expand Down
2 changes: 1 addition & 1 deletion doc/md/guides/sqlite/functional-indexes.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ Awesome! Our query has made use of the index in order to retrieve the results.

After creating the index, the query performance is expected to improve significantly, because the query planner will use the index to perform the search instead of scanning the entire table. This results in fewer disk reads and a reduction in the number of rows that need to be processed, which leads to a faster query execution time.

It is worth noting that while functional indexes can improve performance for certain queries, they can also introduce overhead for insert and update operations, since the index needs to be updated every time the table is modified. This can slow down write-centric workloads,, so they need to be used with caution.
It is worth noting that while functional indexes can improve performance for certain queries, they can also introduce overhead for insert and update operations, since the index needs to be updated every time the table is modified. This can slow down write-centric workloads, so they need to be used with caution.

:::info
To learn more about creating indexes with expressions in SQLite, visit the official documentation [here](https://www.sqlite.org/expridx.html)
Expand Down
2 changes: 1 addition & 1 deletion doc/md/home/docs.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ The default binaries distributed in official releases are released under the [At
<DBCard name="SQL Server" icon="sql-server.svg" url="/guides/mssql" />
<DBCard name="ClickHouse" icon="clickhouse.svg" url="/guides/clickhouse" />
<DBCard name="Redshift" icon="redshift.svg" isDark={true} url="/guides/redshift">
<Icon icon="comming-soon-icon.svg" />
<Icon icon="coming-soon-icon.svg" />
</DBCard>
</div>

Expand Down
8 changes: 4 additions & 4 deletions doc/md/lint/analyzers.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -276,11 +276,11 @@ The following schema change checks are provided by Atlas:
| [MY101](#MY101) | Adding a non-nullable column without a `DEFAULT` value to an existing table |
| [MY102](#MY102) | Adding a column with an inline `REFERENCES` clause has no actual effect |
| [MY110](#MY110) | Removing enum values from a column requires a table copy |
| [MY111](#MY111) | Reordering enum values of a colum requires a table copy |
| [MY111](#MY111) | Reordering enum values of a column requires a table copy |
| [MY112](#MY112) | Inserting new enum values not at the end requires a table copy |
| [MY113](#MY113) | Exceeding 256 enum values changes storage size and requires a table copy |
| [MY120](#MY120) | Removing set values from a column requires a table copy |
| [MY121](#MY121) | Reordering set values of a colum requires a table copy |
| [MY121](#MY121) | Reordering set values of a column requires a table copy |
| [MY122](#MY122) | Inserting new set values not at the end requires a table copy |
| [MY123](#MY123) | Exceeding 8, 16, 24, 32 or 64 set values changes the storage size and requires a table copy |
| **NM** | **[Naming Conventions](#naming-conventions-policy)** |
Expand Down Expand Up @@ -491,7 +491,7 @@ enum definition.

#### MY111 {#MY111}

Reordering enum values of a colum requires a table copy. During this process, the table is locked for write operations.
Reordering enum values of a column requires a table copy. During this process, the table is locked for write operations.

Note that since the order of the enum values defines how the table is sorted when using `ORDER BY` on the column,
controlling the ordering behavior can be achieved using the `DESC` clause or expressions as follows:
Expand Down Expand Up @@ -523,7 +523,7 @@ set definition.

#### MY121 {#MY121}

Reordering set values of a colum requires a table copy. During this process, the table is locked for write operations.
Reordering set values of a column requires a table copy. During this process, the table is locked for write operations.

#### MY122 {#MY122}

Expand Down
2 changes: 1 addition & 1 deletion doc/md/reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ atlas migrate diff [flags] [name]
#### Details
The 'atlas migrate diff' command uses the dev-database to calculate the current state of the migration directory
by executing its files. It then compares its state to the desired state and create a new migration file containing
SQL statements for moving from the current to the desired state. The desired state can be another another database,
SQL statements for moving from the current to the desired state. The desired state can be another database,
an HCL, SQL, or ORM schema. See: https://atlasgo.io/versioned/diff

#### Example
Expand Down
2 changes: 1 addition & 1 deletion doc/website/blog/2022-01-19-atlas-v030.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ is the actual, current schema of our database.

Second, we can see that our ERD has changed reflecting the addition of the `categories` and `post_categories`
tables to our schema. These two tables that have been added are now shown in green. By clicking the "expand"
icon on the top-right corner of the ERD panel, we can open a more detailed view of our schema.
icon in the top-right corner of the ERD panel, we can open a more detailed view of our schema.

![ERD displaying diff](https://blog.ariga.io/uploads/images/posts/atlas-v0.3.0/atlas-post-erd.png)

Expand Down
2 changes: 1 addition & 1 deletion doc/website/blog/2022-02-01-atlas-v032-multi-schema.md
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,6 @@ Atlas also has a [Management UI](https://atlasgo.io/ui/intro) (-w option in the
support is not present there yet - stay tuned for updates on multi-schema support for the UI in an upcoming release!

#### Getting involved with Atlas
* Follow the [Getting Started](https://atlasgo.io/cli/getting-started/setting-up) guide..
* Follow the [Getting Started](https://atlasgo.io/cli/getting-started/setting-up) guide.
* Join our [Discord Server](https://discord.gg/zZ6sWVg6NT).
* Follow us [on Twitter](https://twitter.com/ariga_io).
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ creating [Atlas](https://atlasgo.io) ([GitHub](https://github.com/ariga/atlas))
maintain here at [Ariga](https://ariga.io).

Using Atlas, database schemas can be inspected to product Go structs representing a graph of the database
schema topology. Notice the many cyclic references that make it hard to print (but very ergonomic to travere :-)):
schema topology. Notice the many cyclic references that make it hard to print (but very ergonomic to traverse :-)):

```go
&schema.Realm{
Expand Down Expand Up @@ -299,7 +299,7 @@ package schema
type Inspector interface {
// InspectSchema returns the schema description by its name. An empty name means the
// "attached schema" (e.g. SCHEMA() in MySQL or CURRENT_SCHEMA() in PostgreSQL).
// A NotExistError error is returned if the schema does not exists in the database.
// A NotExistError error is returned if the schema does not exist in the database.
InspectSchema(ctx context.Context, name string, opts *InspectOptions) (*Schema, error)

// InspectRealm returns the description of the connected database.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ While CockroachDB aims to be PostgreSQL compatible, it still has some incompatib
which prevented Atlas users using the existing Postgres dialect from working with it.

With the latest release of Atlas, the Postgres driver automatically detects if it is connected to a CockroachDB
database and uses a custom driver which provides compatability with CockroachDB.
database and uses a custom driver which provides compatibility with CockroachDB.

### Getting started with Atlas and CockroachDB

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ What happened here? After further investigation, you will find that our connecti
not to a schema. The third migration file however does not contain a schema qualifier for the `CREATE TABLE` statement.

By default, Atlas wraps the execution of each migration file into one transaction. This transaction gets rolled back
if any error occurs withing execution. Be aware though, that some databases, such as MySQL and MariaDB, don't support
if any error occurs during execution. Be aware though, that some databases, such as MySQL and MariaDB, don't support
transactional DDL. If you want to learn how to configure the way Atlas uses transactions, have a look at the
[docs](/versioned/apply#transaction-configuration).

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ This means, for example, that renaming a column from `email_address` to `email`
versions of the application to fail when they try to access the column using the old name.
This is a breaking change, and it can cause downtime if not detected and prevented.

**Manually enforcing backward-compatability**
**Manually enforcing backward-compatibility**

Preventing breaking changes in production is crucial to maintaining a highly-available
system. To enforce backward-compatibility most teams rely on two things:
Expand Down
2 changes: 1 addition & 1 deletion doc/website/blog/2023-05-16-vscode-extension.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Atlas includes a data-definition language based on a Terraform-like [syntax](htt
language, which was designed specifically for describing database schemas is based on HashiCorp's [HCL](https://github.com/hashicorp/hcl)
toolkit for creating structured configuration languages.

This data definition language is easy to read, edit, and visualize, and it allows us to organize our schema data in a
This data definition language is easy to read, edit, and visualize, and it allows us to organize our schema data in an
efficient way that has many benefits to using plain SQL.

## What is the Atlas HCL VSCode Extension?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ management world, and we built Atlas to [support both](/concepts/declarative-vs-
the get-go.

Today, we are happy to announce [v0.2.0](https://github.com/ariga/atlas-operator/releases/tag/v0.2.0)
of the Atlas Kubernetes Operator which adds support support for the
of the Atlas Kubernetes Operator which adds support for the
[versioned migration workflow](/integrations/kubernetes/operator#versioned-schema-migrations).

In this blog post we will demonstrate how to use the Atlas Operator this new workflow.
Expand Down
2 changes: 1 addition & 1 deletion doc/website/blog/2023-08-16-passwordless-migrations.md
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ env "rds" {
}
```

Lets break this example down:
Let's break this example down:
* The `locals` block defines two variables – `endpoint` and `username` – that we use to store the database endpoint and
the username of the user created in the database.
* Next, we define an `aws_rds_token` data source to generate a token for the database. To read more about this
Expand Down
2 changes: 1 addition & 1 deletion doc/website/src/pages/index.module.css
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ img {
}
/*end navbar*/

/*titels*/
/*titles*/
.title, .slide3__title {
margin-bottom: 10px;
text-align: left;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# inspect without dev-db will failed
# inspect without dev-db will fail
! atlas schema inspect -u file://a.sql
stderr 'Error: --dev-url cannot be empty'

Expand Down
2 changes: 1 addition & 1 deletion schemahcl/schemahcl.go
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ func (s *State) mayScopeContext(ctx *hcl.EvalContext, scope []string) *hcl.EvalC
return ctx
}
nctx := ctx.NewChild()
// Use the same variables/functions maps to avoid copying per scope, but return a
// Use the same variables/functions maps to avoid copying per scope, but return
// another child context to prevent writes from different blocks to the same maps.
nctx.Variables, nctx.Functions = vars, funcs
return nctx.NewChild()
Expand Down
2 changes: 1 addition & 1 deletion sql/internal/specutil/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -818,7 +818,7 @@ func fromDependsOn[T interface{ AddDeps(...schema.Object) T }](loc string, t T,
})
}
if err != nil {
return fmt.Errorf("find %s refrence for %s.depends_on[%d]: %w", loc, p[0].T, i, err)
return fmt.Errorf("find %s reference for %s.depends_on[%d]: %w", loc, p[0].T, i, err)
}
t.AddDeps(o)
}
Expand Down
2 changes: 1 addition & 1 deletion sql/migrate/dir.go
Original file line number Diff line number Diff line change
Expand Up @@ -717,7 +717,7 @@ type (
// Error implements the error interface.
func (err *ChecksumError) Error() string { return ErrChecksumMismatch.Error() }

// Is exists for backwards compatability reasons.
// Is exists for backwards compatibility reasons.
func (err *ChecksumError) Is(target error) bool {
return errors.Is(ErrChecksumMismatch, target)
}
Expand Down
2 changes: 1 addition & 1 deletion sql/postgres/inspect.go
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ func (i *inspect) partitions(s *schema.Schema) error {
for i := range idxs {
switch idx, err := strconv.Atoi(idxs[i]); {
case err != nil:
return fmt.Errorf("postgres: faild parsing partition key index %q", idxs[i])
return fmt.Errorf("postgres: failed parsing partition key index %q", idxs[i])
// An expression.
case idx == 0:
j := sqlx.ExprLastIndex(d.exprs)
Expand Down
2 changes: 1 addition & 1 deletion sql/schema/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ const (

// List of diff modes.
const (
DiffModeUnset DiffMode = iota // Default, backwards compatability.
DiffModeUnset DiffMode = iota // Default, backwards compatibility.
DiffModeNotNormalized // Diff objects are considered to be in not normalized state.
DiffModeNormalized // Diff objects are considered to be in normalized state.
)
Expand Down
2 changes: 1 addition & 1 deletion sql/sqlite/inspect.go
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ func defaultExpr(x string) schema.Expr {
}
}

// blob literals are hex strings preceded by 'x' (or 'X).
// blob literals are hex strings preceded by 'x' (or 'X').
func isBlob(s string) bool {
if (strings.HasPrefix(s, "x'") || strings.HasPrefix(s, "X'")) && strings.HasSuffix(s, "'") {
_, err := strconv.ParseUint(s[2:len(s)-1], 16, 64)
Expand Down

0 comments on commit 1548fe1

Please sign in to comment.