diff --git a/ADOPTERS.md b/ADOPTERS.md index bec8c9ea7dac..9f4e6c3abb6f 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -7,14 +7,14 @@ to share the details of their usage publicly at this time. * [Calit2 (California Institute for Telecommunications and Information Technology)](http://www.calit2.net/) is one of 4 institutes formed by a joint partnership of - University of California and the state of California with the goal of *“inventing the university - research environment of the future”*. They operate one of the largest known Rook clusters in + University of California and the state of California with the goal of *"inventing the university + research environment of the future"*. They operate one of the largest known Rook clusters in production and they are using Rook to provide cheap, reliable, and fast storage to scientific users. * [NAV (Norwegian Labour and Welfare Administration)](https://www.nav.no/) is the current Norwegian public welfare agency, responsible for 1/3 of the state budget of Norway. They find a massive simplification of management and maintenance for their Ceph clusters by adopting Rook. -* [Replicated](https://www.replicated.com/) delivers *“SaaS On-Prem”* and are the creators of +* [Replicated](https://www.replicated.com/) delivers *"SaaS On-Prem"* and are the creators of open-source [kURL](https://kurl.sh/): a custom Kubernetes distro creator that software vendors use to package and distribute production-grade Kubernetes infrastructure. Rook is a default add-on in kURL, so all installations include Rook to manage highly available storage that the software diff --git a/Documentation/ceph-csi-snapshot.md b/Documentation/ceph-csi-snapshot.md index 5ed0560ce793..08809deed8bb 100644 --- a/Documentation/ceph-csi-snapshot.md +++ b/Documentation/ceph-csi-snapshot.md @@ -18,8 +18,8 @@ documentation](https://kubernetes.io/docs/concepts/storage/volume-snapshot-class In short, as the documentation describes it: > Just like StorageClass provides a way for administrators to describe the -> “classes” of storage they offer when provisioning a volume, -> VolumeSnapshotClass provides a way to describe the “classes” of storage when +> "classes" of storage they offer when provisioning a volume, +> VolumeSnapshotClass provides a way to describe the "classes" of storage when > provisioning a volume snapshot. ## Upgrade Snapshot API diff --git a/build/rbac/rbac.yaml b/build/rbac/rbac.yaml index 76f77cbbd5c0..611d7a961345 100644 --- a/build/rbac/rbac.yaml +++ b/build/rbac/rbac.yaml @@ -464,8 +464,8 @@ metadata: operator: rook storage-backend: ceph rules: - # Most resources are represented by a string representation of their name, such as “pods”, just as it appears in the URL for the relevant API endpoint. - # However, some Kubernetes APIs involve a “subresource”, such as the logs for a pod. [...] + # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. + # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] # To represent this in an RBAC role, use a slash to delimit the resource and subresource. # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources - apiGroups: [''] diff --git a/cluster/charts/rook-ceph/templates/clusterrole.yaml b/cluster/charts/rook-ceph/templates/clusterrole.yaml index 17854caeee3f..83ba8bc9f86c 100644 --- a/cluster/charts/rook-ceph/templates/clusterrole.yaml +++ b/cluster/charts/rook-ceph/templates/clusterrole.yaml @@ -7,8 +7,8 @@ metadata: operator: rook storage-backend: ceph rules: - # Most resources are represented by a string representation of their name, such as “pods”, just as it appears in the URL for the relevant API endpoint. - # However, some Kubernetes APIs involve a “subresource”, such as the logs for a pod. [...] + # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. + # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] # To represent this in an RBAC role, use a slash to delimit the resource and subresource. # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources - apiGroups: [""] diff --git a/cluster/examples/kubernetes/ceph/common.yaml b/cluster/examples/kubernetes/ceph/common.yaml index f65ac123a510..54dbce9937c3 100644 --- a/cluster/examples/kubernetes/ceph/common.yaml +++ b/cluster/examples/kubernetes/ceph/common.yaml @@ -71,8 +71,8 @@ metadata: operator: rook storage-backend: ceph rules: - # Most resources are represented by a string representation of their name, such as “pods”, just as it appears in the URL for the relevant API endpoint. - # However, some Kubernetes APIs involve a “subresource”, such as the logs for a pod. [...] + # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. + # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] # To represent this in an RBAC role, use a slash to delimit the resource and subresource. # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources - apiGroups: [""] diff --git a/design/ceph/ceph-external-cluster.md b/design/ceph/ceph-external-cluster.md index 85fb97bdcf27..a680d780e25d 100644 --- a/design/ceph/ceph-external-cluster.md +++ b/design/ceph/ceph-external-cluster.md @@ -4,7 +4,7 @@ Target version: 1.1 Rook was designed for storage consumption in the same Kubernetes cluster as the clients who are consuming the storage. However, this scenario is not always sufficient. -Another common scenario is when Ceph is running in an “external” cluster from the clients. There are a number of reasons for this scenario: +Another common scenario is when Ceph is running in an "external" cluster from the clients. There are a number of reasons for this scenario: - Centralized Ceph management in a single cluster with multiple Kubernetes clusters that need to consume storage. - Customers already have a Ceph cluster running not in a K8s environment, likely deployed with Ansible, ceph-deploy, or even manually. They should be able to consume this storage from Kubernetes. - Fully independent storage for another level of isolation from their K8s compute nodes. This scenario can technically also be accomplished in a single Kubernetes cluster through labels, taints, and tolerations. @@ -60,7 +60,7 @@ In order for Rook to provide the storage to clients in the local cluster, the Ce 1. Before the CephCluster CRD is created, some metadata must be initialized in local configmaps/secrets to allow the local cluster to manage the external cluster. * mon endpoint(s) and admin keyring 1. The mon, mgr, and osd daemons will not be managed by the local Rook operator. These daemons must be created and managed by the external cluster. -1. The operator will make a “best effort” to keep the list of mons updated. +1. The operator will make a "best effort" to keep the list of mons updated. * If the mons change in the external cluster, the list of mons must be updated in the local cluster. * The operator will need to query the Ceph status periodically (perhaps every minute). If there is a change to the mons, the operator will update the local configmaps/secrets.\ * If the local operator fails to see changes to the external mons, perhaps because it is down, the mon list could become stale. In that case, the admin will need to update the list similarly to how it was initialized when the local cluster was first created. @@ -75,7 +75,7 @@ The first bullet point above requires an extra manual configuration step by the 1. Load the yaml file into the local cluster * `kubectl create -f ` -The CephCluster CRD will have a new property “external” to indicate whether the cluster is external. If true, the local operator will implement the described behavior. +The CephCluster CRD will have a new property "external" to indicate whether the cluster is external. If true, the local operator will implement the described behavior. Other CRDs such as CephBlockPool, CephFilesystem, and CephObjectStore do not need this property since they all belong to the cluster and will effectively inherit the external property. diff --git a/design/ceph/resource-constraints.md b/design/ceph/resource-constraints.md index f68b825fe787..d137feb99e84 100644 --- a/design/ceph/resource-constraints.md +++ b/design/ceph/resource-constraints.md @@ -41,11 +41,11 @@ The precedence of values is: A Kubernetes resource requirement object looks like this: ```yaml requests: - cpu: “2” - memory: “1Gi” + cpu: "2" + memory: "1Gi" limits: - cpu: “3” - memory: “2Gi” + cpu: "3" + memory: "2Gi" ``` The key in the CRDs to set resource requirements is named `resources`. diff --git a/design/common/multiple-storage-types-support.md b/design/common/multiple-storage-types-support.md index ca77861da499..e2ef1b9111a2 100644 --- a/design/common/multiple-storage-types-support.md +++ b/design/common/multiple-storage-types-support.md @@ -103,10 +103,10 @@ Each operator pod would watch the same custom resource types with their own indi For storage backends that fit the patterns that [Metacontroller](https://github.com/GoogleCloudPlatform/metacontroller) supports (`CompositeController` and `DecoratorController`), this could be an option to incorporate into Rook. Basically, a storage backend defines their custom types and the parent/child relationships between them. -The metacontroller handles all the K8s API interactions and regularly calls into storage backend defined “hooks”. +The metacontroller handles all the K8s API interactions and regularly calls into storage backend defined "hooks". The storage backend is given JSON representing the current state in K8s types and then returns JSON defining in K8s types what the desired state should be. The metacontroller then makes that desired state a reality via the K8s API. -This pattern does allow for fairly complicated stateful apps (e.g. [Vitess](https://github.com/GoogleCloudPlatform/metacontroller/tree/master/examples/vitess)) that have well defined parent/children hierarchies, and can allow for the storage backend operator to perform “imperative” operations to manipulate cluster state by launching Jobs. +This pattern does allow for fairly complicated stateful apps (e.g. [Vitess](https://github.com/GoogleCloudPlatform/metacontroller/tree/master/examples/vitess)) that have well defined parent/children hierarchies, and can allow for the storage backend operator to perform "imperative" operations to manipulate cluster state by launching Jobs. ### Recommendation diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go index dab69a5d2dfc..bc7558e9612c 100644 --- a/pkg/operator/ceph/object/controller.go +++ b/pkg/operator/ceph/object/controller.go @@ -468,7 +468,7 @@ func (r *ReconcileCephObjectStore) reconcileCephZone(store *cephv1.CephObjectSto _, err := RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneGroupArg, zoneArg) if err != nil { - // ENOENT mean “No such file or directory” + // ENOENT mean "No such file or directory" if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { return waitForRequeueIfObjectStoreNotReady, errors.Wrapf(err, "ceph zone %q not found", store.Spec.Zone.Name) } else { diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go index 40ef19b0ed8c..b9c911a3a618 100644 --- a/pkg/operator/ceph/object/objectstore.go +++ b/pkg/operator/ceph/object/objectstore.go @@ -382,7 +382,7 @@ func createMultisite(objContext *Context, endpointArg string) error { // create the realm if it doesn't exist yet output, err := RunAdminCommandNoMultisite(objContext, true, "realm", "get", realmArg) if err != nil { - // ENOENT means “No such file or directory” + // ENOENT means "No such file or directory" if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { output, err = RunAdminCommandNoMultisite(objContext, false, "realm", "create", realmArg) if err != nil { @@ -397,7 +397,7 @@ func createMultisite(objContext *Context, endpointArg string) error { // create the zonegroup if it doesn't exist yet output, err = RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) if err != nil { - // ENOENT means “No such file or directory” + // ENOENT means "No such file or directory" if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { output, err = RunAdminCommandNoMultisite(objContext, false, "zonegroup", "create", "--master", realmArg, zoneGroupArg, endpointArg) if err != nil { @@ -412,7 +412,7 @@ func createMultisite(objContext *Context, endpointArg string) error { // create the zone if it doesn't exist yet output, err = runAdminCommand(objContext, true, "zone", "get") if err != nil { - // ENOENT means “No such file or directory” + // ENOENT means "No such file or directory" if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { output, err = runAdminCommand(objContext, false, "zone", "create", "--master", endpointArg) if err != nil {