Skip to content

Commit

Permalink
Merge pull request rook#10003 from rexagod/9711
Browse files Browse the repository at this point in the history
core: support arbitrary node affinity inputs
  • Loading branch information
travisn authored Apr 12, 2022
2 parents db09f91 + 6e49a09 commit 38dc2fa
Show file tree
Hide file tree
Showing 4 changed files with 147 additions and 6 deletions.
20 changes: 14 additions & 6 deletions deploy/examples/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ data:
# Labels to add to the CSI RBD Deployments and DaemonSets Pods.
# ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"

# (Optional) CephCSI provisioner NodeAffinity(applied to both CephFS and RBD provisioner).
# (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
# CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
# Put here list of taints you want to tolerate in YAML format.
Expand All @@ -119,7 +119,7 @@ data:
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) CephCSI plugin NodeAffinity(applied to both CephFS and RBD plugin).
# (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
# CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
# Put here list of taints you want to tolerate in YAML format.
Expand All @@ -132,15 +132,15 @@ data:
# key: node-role.kubernetes.io/etcd
# operator: Exists

# (Optional) CephCSI RBD provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# (Optional) CephCSI RBD provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
# (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_RBD_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/rbd
# operator: Exists
# (Optional) CephCSI RBD plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# (Optional) CephCSI RBD plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
# (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
Expand All @@ -149,16 +149,24 @@ data:
# - key: node.rook.io/rbd
# operator: Exists

# (Optional) CephCSI CephFS provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# (Optional) CephCSI CephFS provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
# (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/cephfs
# operator: Exists
# (Optional) CephCSI CephFS plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# (Optional) CephCSI CephFS plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
# NOTE: Support for defining NodeAffinity for operators other than "In" and "Exists" requires the user to input a
# valid v1.NodeAffinity JSON or YAML string. For example, the following is valid YAML v1.NodeAffinity:
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: |
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: myKey
# operator: DoesNotExist
# (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ require (
k8s.io/cloud-provider v0.21.1
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
sigs.k8s.io/controller-runtime v0.10.2
sigs.k8s.io/yaml v1.2.0
)

replace (
Expand Down
24 changes: 24 additions & 0 deletions pkg/operator/k8sutil/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ package k8sutil

import (
"context"
"encoding/json"
"errors"
"fmt"
"sigs.k8s.io/yaml"
"strings"

cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
Expand Down Expand Up @@ -321,6 +323,11 @@ func RookNodesMatchingKubernetesNodes(rookStorage cephv1.StorageScopeSpec, kuber

// GenerateNodeAffinity will return v1.NodeAffinity or error
func GenerateNodeAffinity(nodeAffinity string) (*v1.NodeAffinity, error) {
affinity, err := evaluateJSONOrYAMLInput(nodeAffinity)
if err == nil {
return affinity, nil
}
logger.Debugf("input not a valid JSON or YAML: %s, continuing", err)
newNodeAffinity := &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
Expand Down Expand Up @@ -378,3 +385,20 @@ func GenerateNodeAffinity(nodeAffinity string) (*v1.NodeAffinity, error) {
}
return newNodeAffinity, nil
}

func evaluateJSONOrYAMLInput(nodeAffinity string) (*v1.NodeAffinity, error) {
var err error
arr := []byte(nodeAffinity)
if !json.Valid(arr) {
arr, err = yaml.YAMLToJSON(arr)
if err != nil {
return nil, fmt.Errorf("failed to process YAML node affinity input: %v", err)
}
}
var affinity *v1.NodeAffinity
unmarshalErr := json.Unmarshal(arr, &affinity)
if unmarshalErr != nil {
return nil, fmt.Errorf("cannot unmarshal affinity: %s", unmarshalErr)
}
return affinity, nil
}
108 changes: 108 additions & 0 deletions pkg/operator/k8sutil/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,114 @@ func TestGenerateNodeAffinity(t *testing.T) {
},
wantErr: false,
},
{
name: "GenerateNodeAffinityWithJSONInputUsingDoesNotExistOperator",
args: args{
nodeAffinity: `{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"myKey","operator":"DoesNotExist"}]}]}}`,
},
want: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "myKey",
Operator: v1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
wantErr: false,
},
{
name: "GenerateNodeAffinityWithJSONInputUsingNotInOperator",
args: args{
nodeAffinity: `{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"myKey","operator":"NotIn","values":["myValue"]}]}]}}`,
},
want: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "myKey",
Operator: v1.NodeSelectorOpNotIn,
Values: []string{
"myValue",
},
},
},
},
},
},
},
wantErr: false,
},
{
name: "GenerateNodeAffinityWithYAMLInputUsingDoesNotExistOperator",
args: args{
nodeAffinity: `
---
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
-
matchExpressions:
-
key: myKey
operator: DoesNotExist`,
},
want: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "myKey",
Operator: v1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
wantErr: false,
},
{
name: "GenerateNodeAffinityWithYAMLInputUsingNotInOperator",
args: args{
nodeAffinity: `
---
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
-
matchExpressions:
-
key: myKey
operator: NotIn
values:
- myValue`,
},
want: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "myKey",
Operator: v1.NodeSelectorOpNotIn,
Values: []string{
"myValue",
},
},
},
},
},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand Down

0 comments on commit 38dc2fa

Please sign in to comment.