Skip to content

Commit

Permalink
Update e2e for system tags
Browse files Browse the repository at this point in the history
  • Loading branch information
GouthamML authored and YashwantGohokar committed Jul 1, 2024
1 parent 3e461b0 commit a35c754
Show file tree
Hide file tree
Showing 8 changed files with 90 additions and 24 deletions.
3 changes: 2 additions & 1 deletion hack/run_e2e_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ function run_e2e_tests_existing_cluster() {
--volume-handle=${FSS_VOLUME_HANDLE} \
--static-snapshot-compartment-id=${STATIC_SNAPSHOT_COMPARTMENT_ID} \
--enable-parallel-run=${ENABLE_PARALLEL_RUN} \
--run-uhp-e2e=${RUN_UHP_E2E}
--run-uhp-e2e=${RUN_UHP_E2E} \
--add-oke-system-tags="false"
retval=$?
return $retval
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -683,13 +683,13 @@ func healthCheckPortInUse(serviceLister listersv1.ServiceLister, port int32) (bo
if service.DeletionTimestamp != nil || service.Spec.Type != api.ServiceTypeLoadBalancer {
continue
}
if service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyCluster {
if service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyTypeCluster {
// This service is using the default healthcheck port, so we must check if
// any other service is also using this default healthcheck port.
if port == lbNodesHealthCheckPort {
return true, nil
}
} else if service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyLocal {
} else if service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyTypeLocal {
// This service is using a custom healthcheck port (enabled through setting
// externalTrafficPolicy=Local on the service). As this port is unique
// per service, we know no other service will be using this port too.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
api "k8s.io/kubernetes/pkg/apis/core"
k8sports "k8s.io/kubernetes/pkg/cluster/ports"
)

Expand Down Expand Up @@ -171,7 +170,7 @@ func TestGetNodeIngressRules(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-default-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyCluster),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
Ports: []v1.ServicePort{{Port: 443}},
},
},
Expand Down Expand Up @@ -201,15 +200,15 @@ func TestGetNodeIngressRules(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-default-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyCluster),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
Ports: []v1.ServicePort{{Port: 443}},
},
},
{
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-NodePort-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyLocal),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
Ports: []v1.ServicePort{{Port: 8081}},
HealthCheckNodePort: 32000,
},
Expand Down Expand Up @@ -316,7 +315,7 @@ func TestGetNodeIngressRules(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-non-default-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyLocal),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
Ports: []v1.ServicePort{{Port: 8081}},
},
},
Expand Down Expand Up @@ -495,7 +494,7 @@ func TestGetNodeIngressRules_NLB(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-default-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyCluster),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
Ports: []v1.ServicePort{{Port: 443}},
},
},
Expand Down Expand Up @@ -996,7 +995,7 @@ func TestGetLoadBalancerEgressRules(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-default-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyCluster),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
Ports: []v1.ServicePort{{Port: 80}},
},
},
Expand All @@ -1020,7 +1019,7 @@ func TestGetLoadBalancerEgressRules(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-default-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyLocal),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
HealthCheckNodePort: 30000,
},
},
Expand All @@ -1045,7 +1044,7 @@ func TestGetLoadBalancerEgressRules(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Namespace: "namespace", Name: "using-Nodeport-health-check-port"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyLocal),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
Ports: []v1.ServicePort{{Port: 80}},
HealthCheckNodePort: 30000,
},
Expand All @@ -1055,7 +1054,7 @@ func TestGetLoadBalancerEgressRules(t *testing.T) {
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
Ports: []v1.ServicePort{{Port: 8080}},
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicy(api.ServiceExternalTrafficPolicyCluster),
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
},
},
},
Expand Down
18 changes: 16 additions & 2 deletions test/e2e/cloud-provider-oci/csi_volume_creation.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,28 @@ import (

var _ = Describe("CSI Volume Creation", func() {
f := framework.NewDefaultFramework("csi-basic")
Context("[cloudprovider][storage][csi]", func() {
Context("[cloudprovider][storage][csi][system-tags]", func() {
It("Create PVC and POD for CSI.", func() {
pvcJig := framework.NewPVCTestJig(f.ClientSet, "csi-provisioner-e2e-tests")

ctx := context.TODO()
scName := f.CreateStorageClassOrFail(f.Namespace.Name, "blockvolume.csi.oraclecloud.com", nil, pvcJig.Labels, "WaitForFirstConsumer", false, "Delete", nil)
pvc := pvcJig.CreateAndAwaitPVCOrFailCSI(f.Namespace.Name, framework.MinVolumeBlock, scName, nil, v1.PersistentVolumeFilesystem, v1.ReadWriteOnce, v1.ClaimPending)
f.VolumeIds = append(f.VolumeIds, pvc.Spec.VolumeName)
pvcJig.NewPodForCSI("app1", f.Namespace.Name, pvc.Name, setupF.AdLabel)
volumeName := pvcJig.GetVolumeNameFromPVC(pvc.GetName(), f.Namespace.Name)
compartmentId := f.GetCompartmentId(*setupF)
// read created BV
volumes, err := f.Client.BlockStorage().GetVolumesByName(ctx, volumeName, compartmentId)
framework.ExpectNoError(err)
// volume name duplicate should not exist
for _, volume := range volumes {
framework.Logf("volume details %v :", volume)
framework.Logf("cluster ocid from setup is %s", setupF.ClusterOcid)
if setupF.AddOkeSystemTags && !framework.HasOkeSystemTags(volume.SystemTags) {
framework.Failf("the resource %s is expected to have oke system tags", *volume.Id)
}
}

})

It("Create PVC with VolumeSize 1Gi but should use default 50Gi", func() {
Expand Down
38 changes: 32 additions & 6 deletions test/e2e/cloud-provider-oci/load_balancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ var _ = Describe("Service [Slow]", func() {
},
},
}
Context("[cloudprovider][ccm][lb][SL]", func() {
Context("[cloudprovider][ccm][lb][SL][system-tags]", func() {
It("should be possible to create and mutate a Service type:LoadBalancer (change nodeport) [Canary]", func() {
for _, test := range basicTestArray {
By("Running test for: " + test.lbType)
Expand Down Expand Up @@ -99,6 +99,32 @@ var _ = Describe("Service [Slow]", func() {
tcpService = jig.WaitForLoadBalancerOrFail(ns, tcpService.Name, loadBalancerCreateTimeout)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)

By("validating system tags on the loadbalancer")
lbName := cloudprovider.GetLoadBalancerName(tcpService)
sharedfw.Logf("LB Name is %s", lbName)
ctx := context.TODO()
compartmentId := ""
if setupF.Compartment1 != "" {
compartmentId = setupF.Compartment1
} else if f.CloudProviderConfig.CompartmentID != "" {
compartmentId = f.CloudProviderConfig.CompartmentID
} else if f.CloudProviderConfig.Auth.CompartmentID != "" {
compartmentId = f.CloudProviderConfig.Auth.CompartmentID
} else {
sharedfw.Failf("Compartment Id undefined.")
}
lbType := test.lbType
if strings.HasSuffix(test.lbType, "-wris") {
lbType = strings.TrimSuffix(test.lbType, "-wris")
}
loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), lbType, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName)
sharedfw.ExpectNoError(err)
sharedfw.Logf("Loadbalancer details %v:", loadBalancer)
sharedfw.Logf("cluster ocid from setup is %s", setupF.ClusterOcid)
if setupF.AddOkeSystemTags && !sharedfw.HasOkeSystemTags(loadBalancer.SystemTags) {
sharedfw.Failf("Loadbalancer is expected to have the system tags")
}

tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
sharedfw.Logf("TCP node port: %d", tcpNodePort)

Expand Down Expand Up @@ -1347,8 +1373,8 @@ var _ = Describe("LB Properties", func() {
{
"lb",
map[string]string{
cloudprovider.ServiceAnnotationLoadBalancerInternal: "true",
cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible",
cloudprovider.ServiceAnnotationLoadBalancerInternal: "true",
cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible",
cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMin: "10",
cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMax: "10",
},
Expand Down Expand Up @@ -1478,10 +1504,10 @@ var _ = Describe("LB Properties", func() {
{
"lb",
map[string]string{
cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible",
cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible",
cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMin: "10",
cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMax: "10",
cloudprovider.ServiceAnnotationLoadBalancerPolicy: cloudprovider.IPHashLoadBalancerPolicy,
cloudprovider.ServiceAnnotationLoadBalancerPolicy: cloudprovider.IPHashLoadBalancerPolicy,
},
map[string]string{
cloudprovider.ServiceAnnotationLoadBalancerPolicy: cloudprovider.LeastConnectionsLoadBalancerPolicy,
Expand Down Expand Up @@ -1590,7 +1616,7 @@ var _ = Describe("LB Properties", func() {
{
"lb",
map[string]string{
cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible",
cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible",
cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMin: "10",
cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMax: "10",
},
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/cloud-provider-oci/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
sharedfw.Logf("CloudProviderFramework Setup")
sharedfw.Logf("Running tests with existing cluster.")
return nil
}, func(data []byte) {
setupF = sharedfw.New()
},
}, func(data []byte) {
setupF = sharedfw.New()
},
)

var _ = ginkgo.SynchronizedAfterSuite(func() {}, func() {
Expand Down
4 changes: 4 additions & 0 deletions test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ var (
staticSnapshotCompartmentOCID string // Compartment ID for cross compartment snapshot test
runUhpE2E bool // Whether to run UHP E2Es, requires Volume Management Plugin enabled on the node and 16+ cores (check blockvolumeperformance public doc for the exact requirements)
enableParallelRun bool
addOkeSystemTags bool
)

func init() {
Expand Down Expand Up @@ -134,6 +135,7 @@ func init() {
flag.StringVar(&staticSnapshotCompartmentOCID, "static-snapshot-compartment-id", "", "Compartment ID for cross compartment snapshot test")
flag.BoolVar(&runUhpE2E, "run-uhp-e2e", false, "Run UHP E2Es as well")
flag.BoolVar(&enableParallelRun, "enable-parallel-run", true, "Enables parallel running of test suite")
flag.BoolVar(&addOkeSystemTags, "add-oke-system-tags", false, "Adds oke system tags to new and existing loadbalancers and storage resources")
}

// Framework is the context of the text execution.
Expand Down Expand Up @@ -167,6 +169,7 @@ type Framework struct {
// Compartment ID for cross compartment snapshot test
StaticSnapshotCompartmentOcid string
RunUhpE2E bool
AddOkeSystemTags bool
}

// New creates a new a framework that holds the context of the test
Expand All @@ -191,6 +194,7 @@ func NewWithConfig() *Framework {
VolumeHandle: volumeHandle,
StaticSnapshotCompartmentOcid: staticSnapshotCompartmentOCID,
RunUhpE2E: runUhpE2E,
AddOkeSystemTags: addOkeSystemTags,
}

f.CloudConfigPath = cloudConfigFile
Expand Down
22 changes: 22 additions & 0 deletions test/e2e/framework/system_tags_util.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
package framework

import (
cloudprovider "github.com/oracle/oci-cloud-controller-manager/pkg/cloudprovider/providers/oci"
)

const (
okeSystemTagKey = "Cluster"
)

func HasOkeSystemTags(systemTags map[string]map[string]interface{}) bool {
Logf("actual system tags on the resource: %v", systemTags)
if systemTags != nil {
if okeSystemTag, okeSystemTagNsExists := systemTags[cloudprovider.OkeSystemTagNamesapce]; okeSystemTagNsExists {
if _, okeSystemTagKeyExists := okeSystemTag[okeSystemTagKey]; okeSystemTagKeyExists {
return true
}
}
return false
}
return false
}

0 comments on commit a35c754

Please sign in to comment.