From 36360db0bcac815471a829c1f423e746fe7b81a8 Mon Sep 17 00:00:00 2001 From: Martin Matyas Date: Wed, 28 Feb 2024 16:23:40 +0100 Subject: [PATCH] Workload tests adapted to cenralized result logging Signed-off-by: Martin Matyas --- embedded_files/points.yml | 69 ++++++- src/tasks/utils/points.cr | 2 +- src/tasks/workload/5g_validator.cr | 36 ++-- src/tasks/workload/compatibility.cr | 139 +++++--------- src/tasks/workload/configuration.cr | 279 +++++++++------------------- src/tasks/workload/microservice.cr | 224 ++++++++-------------- src/tasks/workload/ran.cr | 17 +- src/tasks/workload/reliability.cr | 201 ++++++++------------ src/tasks/workload/security.cr | 277 ++++++++------------------- src/tasks/workload/state.cr | 102 ++++------ 10 files changed, 481 insertions(+), 865 deletions(-) diff --git a/embedded_files/points.yml b/embedded_files/points.yml index 4d9c2c0d1..a570c6d93 100644 --- a/embedded_files/points.yml +++ b/embedded_files/points.yml @@ -8,24 +8,34 @@ neutral: 0 - name: reasonable_image_size + emoji: "βš–πŸ‘€" tags: [microservice, dynamic, workload, cert, normal] - name: specialized_init_system + emoji: "πŸš€" tags: [microservice, dynamic, workload] - name: reasonable_startup_time tags: [microservice, dynamic, workload, cert, normal] -- name: single_process_type +- name: single_process_type + emoji: "βš–πŸ‘€" tags: [microservice, dynamic, workload, essential, cert] pass: 100 +- name: zombie_handled + emoji: "βš–πŸ‘€" + tags: [microservice, dynamic, workload, normal] - name: service_discovery + emoji: "βš–πŸ‘€" tags: [microservice, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: shared_database + emoji: "πŸ’Ύ" tags: [microservice, dynamic, workload, cert, normal] -- name: sig_term_handled +- name: sig_term_handled + emoji: "βš–πŸ‘€" tags: [microservice, dynamic, workload, normal] - name: cni_compatible + emoji: "πŸ”“πŸ”‘" tags: [compatibility, dynamic, workload, cert, normal] # - name: cni_spec # tags: compatibility, dynamic @@ -41,23 +51,29 @@ #- name: check_reaped # tags: state, dynamic, configuration -- name: privileged +- name: privileged + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload] # required: true -- name: privilege_escalation +- name: privilege_escalation + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: symlink_file_system + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: application_credentials + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: host_network + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] #- name: shells # tags: security, dynamic #- name: protected_access # tags: security, dynamic -- name: increase_decrease_capacity +- name: increase_decrease_capacity + emoji: "πŸ“¦πŸ“ˆπŸ“‰" tags: [compatibility, dynamic, workload, essential, cert] pass: 100 #- name: small_autoscaling @@ -67,28 +83,36 @@ # - name: network_chaos # tags: resilience, dynamic, workload - name: pod_network_latency + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: pod_network_corruption + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: pod_network_duplication + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: pod_delete + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, normal] - name: pod_io_stress + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: pod_memory_hog + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, normal] - name: disk_fill + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, normal] - name: pod_dns_error + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [resilience, dynamic, workload, cert, bonus] pass: 1 fail: 0 @@ -98,17 +122,22 @@ #- name: versioned_helm_chart # tags: configuration, dynamic, workload - name: versioned_tag + emoji: "🏷️" tags: [configuration, dynamic, workload] - name: ip_addresses + emoji: "πŸ“ΆπŸƒβ²οΈ" pass: 0 fail: -1 tags: [configuration, static, workload] - name: operator_installed + emoji: "βš–οΈπŸ‘€" tags: [configuration, dynamic, workload, cert, bonus] - name: liveness + emoji: "⎈🧫" tags: [resilience, dynamic, workload, essential, cert] pass: 100 - name: readiness + emoji: "⎈🧫" tags: [resilience, dynamic, workload, essential, cert] pass: 100 #- name: no_volume_with_configuration @@ -130,10 +159,12 @@ tags: [configuration, dynamic, workload, essential, cert] pass: 100 - name: secrets_used + emoji: "🧫" tags: [configuration, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: immutable_configmap + emoji: "βš–οΈ" tags: [configuration, dynamic, workload, cert, bonus] pass: 1 fail: 0 @@ -150,10 +181,13 @@ # tags: observability, dynamic, workload - name: helm_deploy + emoji: "βš™πŸ› οΈβ¬†β˜" tags: [compatibility, dynamic, workload, cert, normal] - name: helm_chart_valid + emoji: "βŽˆπŸ“β˜‘" tags: [compatibility, dynamic, workload, cert, normal] - name: helm_chart_published + emoji: "βŽˆπŸ“¦πŸŒ" tags: [compatibility, dynamic, workload, cert, normal] # - name: chaos_network_loss @@ -164,21 +198,26 @@ # tags: resilience, dynamic, workload - name: volume_hostpath_not_found + emoji: "πŸ’Ύ" tags: [state, dynamic, workload] - name: no_local_volume_configuration + emoji: "πŸ’Ύ" tags: [state, dynamic, workload, cert, bonus] pass: 1 fail: 0 -- name: elastic_volumes +- name: elastic_volumes + emoji: "🧫" tags: [state, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: database_persistence + emoji: "🧫" tags: [state, dynamic, workload] pass5: 5 pass3: 3 fail: -1 - name: node_drain + emoji: "πŸ—‘οΈπŸ’€β™»" tags: [state, dynamic, workload, essential, cert] pass: 100 @@ -207,46 +246,57 @@ tags: ["platform", "platform:security", "dynamic"] - name: service_account_mapping + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: privileged_containers + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, essential, cert] pass: 100 - name: non_root_containers + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, essential, cert] pass: 100 - name: host_pid_ipc_privileges + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: linux_hardening + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: resource_policies + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, essential] pass: 100 - name: immutable_file_systems + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: hostpath_mounts + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, essential, cert] pass: 100 - name: ingress_egress_blocked + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, bonus] pass: 1 fail: 0 - name: insecure_capabilities + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: sysctls + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: log_output @@ -274,29 +324,36 @@ pass: 1 fail: 0 - name: alpha_k8s_apis + emoji: "β­•πŸ”" tags: [configuration, dynamic, workload] - name: container_sock_mounts + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, essential, cert] pass: 100 - name: require_labels + emoji: "🏷️" tags: [configuration, dynamic, workload, cert, normal] - name: helm_tiller tags: ["platform", "platform:security", "dynamic"] - name: external_ips + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, cert, normal] - name: selinux_options + emoji: "πŸ”“πŸ”‘" tags: [security, dynamic, workload, essential, cert] pass: 100 - name: default_namespace + emoji: "🏷️" tags: [configuration, dynamic, workload, cert, normal] - name: latest_tag + emoji: "🏷️" tags: [configuration, dynamic, workload, essential, cert] pass: 100 diff --git a/src/tasks/utils/points.cr b/src/tasks/utils/points.cr index de9d715d1..43a6bfb2f 100644 --- a/src/tasks/utils/points.cr +++ b/src/tasks/utils/points.cr @@ -556,7 +556,7 @@ module CNFManager Log.debug { "task #{task} emoji: #{md["emoji"]?}" } resp = md["emoji"] else - resp = [] of String + resp = "" end end diff --git a/src/tasks/workload/5g_validator.cr b/src/tasks/workload/5g_validator.cr index b9d325fc8..ae8f52bf4 100644 --- a/src/tasks/workload/5g_validator.cr +++ b/src/tasks/workload/5g_validator.cr @@ -17,10 +17,7 @@ end desc "Test if a 5G core is valid" task "smf_upf_core_validator" do |t, args| #todo change to 5g_core_validator - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "smf_upf_core_validator" - Log.for(testsuite_task).info { "Starting test" } + CNFManager::Task.task_runner(args, task: t) do |args, config| # todo add other resilience and compatiblity tests @@ -33,11 +30,8 @@ end desc "Test if a 5G core has SMF/UPF heartbeat" task "smf_upf_heartbeat" do |t, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "smf_upf_heartbeat" - Log.for(testsuite_task).info { "Starting test" } - Log.for(testsuite_task).info { "named args: #{args.named}" } + CNFManager::Task.task_runner(args, task: t) do |args, config| + Log.for(t.name).info { "named args: #{args.named}" } baseline_count : Int32 | Float64 | String | Nil if args.named["baseline_count"]? baseline_count = args.named["baseline_count"].to_i @@ -45,7 +39,6 @@ task "smf_upf_heartbeat" do |t, args| baseline_count = nil end - Log.debug { "cnf_config: #{config}" } suci_found : Bool | Nil smf = config.cnf_config[:smf_label]? upf = config.cnf_config[:upf_label]? @@ -133,24 +126,18 @@ task "smf_upf_heartbeat" do |t, args| end #todo move this to validator code code - if heartbeat_found - resp = upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: Chaos service degradation is less than 50%.", task_start_time) + if heartbeat_found + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Chaos service degradation is less than 50%") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Chaos service degradation is more than 50%.", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Chaos service degradation is more than 50%") end - resp end end #todo move to 5g test files desc "Test if a 5G core supports SUCI Concealment" -task "suci_enabled" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "suci_enabled" - Log.for(testsuite_task).info { "Starting test" } - - Log.debug { "cnf_config: #{config}" } +task "suci_enabled" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| suci_found : Bool | Nil core = config.cnf_config[:amf_label]? Log.info { "core: #{core}" } @@ -192,12 +179,11 @@ task "suci_enabled" do |_, args| end - if suci_found - resp = upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: Core uses SUCI 5g authentication", task_start_time) + if suci_found + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Core uses SUCI 5g authentication") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Core does not use SUCI 5g authentication", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Core does not use SUCI 5g authentication") end - resp ensure Helm.delete("ueransim") ClusterTools.uninstall diff --git a/src/tasks/workload/compatibility.cr b/src/tasks/workload/compatibility.cr index 2c1e7d5e3..cb01d3601 100644 --- a/src/tasks/workload/compatibility.cr +++ b/src/tasks/workload/compatibility.cr @@ -22,15 +22,10 @@ rolling_version_change_test_names.each do |tn| pretty_test_name_capitalized = tn.split(/:|_/).map(&.capitalize).join(" ") desc "Test if the CNF containers are loosely coupled by performing a #{pretty_test_name}" - task "#{tn}" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = tn - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } + task "#{tn}" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| container_names = config.cnf_config[:container_names] - Log.for(testsuite_task).debug { "container_names: #{container_names}" } + Log.for(t.name).debug { "container_names: #{container_names}" } update_applied = true unless container_names puts "Please add a container names set of entries into your cnf-testsuite.yml".colorize(:red) @@ -46,8 +41,8 @@ rolling_version_change_test_names.each do |tn| namespace = resource["namespace"] || config.cnf_config[:helm_install_namespace] test_passed = true valid_cnf_testsuite_yml = true - Log.for(testsuite_task).debug { "container: #{container}" } - Log.for(testsuite_task).debug { "container_names: #{container_names}" } + Log.for(t.name).debug { "container: #{container}" } + Log.for(t.name).debug { "container_names: #{container_names}" } #todo use skopeo to get the next and previous versions of the cnf image dynamically config_container = container_names.find{|x| x["name"]==container.as_h["name"]} if container_names LOGGING.debug "config_container: #{config_container}" @@ -86,11 +81,10 @@ rolling_version_change_test_names.each do |tn| end VERBOSE_LOGGING.debug "#{tn}: task_response=#{task_response}" if check_verbose(args) if task_response - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: CNF for #{pretty_test_name_capitalized} Passed", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "CNF for #{pretty_test_name_capitalized} Passed") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: CNF for #{pretty_test_name_capitalized} Failed", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "CNF for #{pretty_test_name_capitalized} Failed") end - resp # TODO should we roll the image back to original version in an ensure? # TODO Use the kubectl rollback to history command end @@ -98,16 +92,10 @@ rolling_version_change_test_names.each do |tn| end desc "Test if the CNF can perform a rollback" -task "rollback" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "rollback" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - +task "rollback" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| container_names = config.cnf_config[:container_names] - Log.for(testsuite_task).debug { "container_names: #{container_names}" } + Log.for(t.name).debug { "container_names: #{container_names}" } update_applied = true rollout_status = true @@ -128,7 +116,7 @@ task "rollback" do |_, args| image_name = full_image_name_tag[0] image_tag = full_image_name_tag[2] - Log.for(testsuite_task).debug { + Log.for(t.name).debug { "Rollback: setting new version; resource=#{resource_kind}/#{resource_name}; container_name=#{container_name}; image_name=#{image_name}; image_tag: #{image_tag}" } #do_update = `kubectl set image deployment/coredns-coredns coredns=coredns/coredns:latest --record` @@ -144,11 +132,11 @@ task "rollback" do |_, args| rollback_from_tag = config_container["rollback_from_tag"] if rollback_from_tag == image_tag - stdout_failure("βœ–οΈ FAILED: please specify a different version than the helm chart default image.tag for 'rollback_from_tag' ") + stdout_failure("Rollback not possible. Please specify a different version than the helm chart default image.tag for 'rollback_from_tag' ") version_change_applied=false end - Log.for(testsuite_task).debug { + Log.for(t.name).debug { "rollback: update #{resource_kind}/#{resource_name}, container: #{container_name}, image: #{image_name}, tag: #{rollback_from_tag}" } # set a temporary image/tag, so that we can rollback to the current (original) tag later @@ -162,25 +150,25 @@ task "rollback" do |_, args| ) end - Log.for(testsuite_task).info { "rollback version change successful? #{version_change_applied}" } + Log.for(t.name).info { "rollback version change successful? #{version_change_applied}" } - Log.for(testsuite_task).debug { "rollback: checking status new version" } + Log.for(t.name).debug { "rollback: checking status new version" } rollout_status = KubectlClient::Rollout.status(resource_kind, resource_name, namespace: namespace, timeout: "180s") if rollout_status == false stdout_failure("Rollback failed on resource: #{resource_kind}/#{resource_name} and container: #{container_name}") end # https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-back-to-a-previous-revision - Log.for(testsuite_task).debug { "rollback: rolling back to old version" } + Log.for(t.name).debug { "rollback: rolling back to old version" } rollback_status = KubectlClient::Rollout.undo(resource_kind, resource_name, namespace: namespace) end if task_response && version_change_applied && rollout_status && rollback_status - upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: CNF Rollback Passed", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "CNF Rollback Passed") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: CNF Rollback Failed", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "CNF Rollback Failed") end end end @@ -188,13 +176,7 @@ end desc "Test increasing/decreasing capacity" task "increase_decrease_capacity" do |t, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "increase_decrease_capacity" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).info { "increase_decrease_capacity" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| increase_test_base_replicas = "1" increase_test_target_replicas = "3" @@ -231,21 +213,16 @@ task "increase_decrease_capacity" do |t, args| end decrease_task_successful = !decrease_task_response.nil? && decrease_task_response.none?(false) - emoji_capacity = "πŸ“¦πŸ“ˆπŸ“‰" - pass_msg = "βœ”οΈ πŸ† PASSED: Replicas increased to #{increase_test_target_replicas} and decreased to #{decrease_test_target_replicas} #{emoji_capacity}" - fail_msg = "βœ–οΈ FAILURE: Capacity change failed #{emoji_capacity}" - - if increase_task_successful && decrease_task_successful - upsert_passed_task(testsuite_task, pass_msg, task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Replicas increased to #{increase_test_target_replicas} and decreased to #{decrease_test_target_replicas}") else - upsert_failed_task(testsuite_task, fail_msg, task_start_time) stdout_failure(increase_decrease_remedy_msg()) unless increase_task_successful stdout_failure("Failed to increase replicas from #{increase_test_base_replicas} to #{increase_test_target_replicas}") else stdout_failure("Failed to decrease replicas from #{decrease_test_base_replicas} to #{decrease_test_target_replicas}") end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Capacity change failed") end end end @@ -427,17 +404,11 @@ def wait_for_scaling(resource, target_replica_count, args) end desc "Will the CNF install using helm with helm_deploy?" -task "helm_deploy" do |_, args| - task_start_time = Time.utc - testsuite_task = "helm_deploy" - Log.for(testsuite_task).info { "Running #{testsuite_task}" } - Log.for(testsuite_task).info { "helm_deploy args: #{args.inspect}" } if check_verbose(args) +task "helm_deploy" do |t, args| + Log.for(t.name).info { "helm_deploy args: #{args.inspect}" } if check_verbose(args) if check_cnf_config(args) || CNFManager.destination_cnfs_exist? - CNFManager::Task.task_runner(args) do |args, config| - Log.for(testsuite_task).info { "Starting test" } - - emoji_helm_deploy="βš™οΈπŸ› οΈβ¬†β˜οΈ" + CNFManager::Task.task_runner(args, task: t) do |args, config| helm_chart = config.cnf_config[:helm_chart] helm_directory = config.cnf_config[:helm_directory] release_name = config.cnf_config[:release_name] @@ -447,22 +418,18 @@ task "helm_deploy" do |_, args| helm_used = configmap["data"].as_h["helm_used"].as_s if helm_used == "true" - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Helm deploy successful #{emoji_helm_deploy}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Helm deploy successful") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Helm deploy failed #{emoji_helm_deploy}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Helm deploy failed") end end else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: No cnf_testsuite.yml found! Did you run the setup task?", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "No cnf_testsuite.yml found! Did you run the setup task?") end end -task "helm_chart_published", ["helm_local_install"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "helm_chart_published" - Log.for(testsuite_task).info { "Starting test" } - +task "helm_chart_published", ["helm_local_install"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| if check_verbose(args) Log.for("verbose").debug { "helm_chart_published args.raw: #{args.raw}" } Log.for("verbose").debug { "helm_chart_published args.named: #{args.named}" } @@ -472,7 +439,6 @@ task "helm_chart_published", ["helm_local_install"] do |_, args| # config = CNFManager.parsed_config_file(CNFManager.ensure_cnf_testsuite_yml_path(args.named["cnf-config"].as(String))) # helm_chart = "#{config.get("helm_chart").as_s?}" helm_chart = config.cnf_config[:helm_chart] - emoji_published_helm_chart="βŽˆπŸ“¦πŸŒ" current_dir = FileUtils.pwd helm = Helm::BinarySingleton.helm Log.for("verbose").debug { helm } if check_verbose(args) @@ -480,7 +446,7 @@ task "helm_chart_published", ["helm_local_install"] do |_, args| if CNFManager.helm_repo_add(args: args) unless helm_chart.empty? helm_search_cmd = "#{helm} search repo #{helm_chart}" - Log.for(testsuite_task).info { "helm search command: #{helm_search_cmd}" } + Log.for(t.name).info { "helm search command: #{helm_search_cmd}" } Process.run( helm_search_cmd, shell: true, @@ -490,25 +456,21 @@ task "helm_chart_published", ["helm_local_install"] do |_, args| helm_search = helm_search_stdout.to_s Log.for("verbose").debug { "#{helm_search}" } if check_verbose(args) unless helm_search =~ /No results found/ - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Published Helm Chart Found #{emoji_published_helm_chart}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Published Helm Chart Found") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Published Helm Chart Not Found #{emoji_published_helm_chart}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Published Helm Chart Not Found") end else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Published Helm Chart Not Found #{emoji_published_helm_chart}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Published Helm Chart Not Found") end else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Published Helm Chart Not Found #{emoji_published_helm_chart}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Published Helm Chart Not Found") end end end -task "helm_chart_valid", ["helm_local_install"] do |_, args| - CNFManager::Task.task_runner(args) do |args| - task_start_time = Time.utc - testsuite_task = "helm_chart_valid" - Log.for(testsuite_task).info { "Starting test" } - +task "helm_chart_valid", ["helm_local_install"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args| if check_verbose(args) Log.for("verbose").debug { "helm_chart_valid args.raw: #{args.raw}" } Log.for("verbose").debug { "helm_chart_valid args.named: #{args.named}" } @@ -532,9 +494,8 @@ task "helm_chart_valid", ["helm_local_install"] do |_, args| Log.for("verbose").debug { "working_chart_directory: #{working_chart_directory}" } if check_verbose(args) current_dir = FileUtils.pwd - Log.for(testsuite_task).debug { "current dir: #{current_dir}" } + Log.for(t.name).debug { "current dir: #{current_dir}" } helm = Helm::BinarySingleton.helm - emoji_helm_lint="βŽˆπŸ“β˜‘οΈ" destination_cnf_dir = CNFManager.cnf_destination_dir(CNFManager.ensure_cnf_testsuite_dir(args.named["cnf-config"].as(String))) @@ -546,12 +507,12 @@ task "helm_chart_valid", ["helm_local_install"] do |_, args| error: helm_link_stderr = IO::Memory.new ) helm_lint = helm_lint_stdout.to_s - Log.for(testsuite_task).debug { "helm_lint: #{helm_lint}" } if check_verbose(args) + Log.for(t.name).debug { "helm_lint: #{helm_lint}" } if check_verbose(args) if helm_lint_status.success? - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Helm Chart #{working_chart_directory} Lint Passed #{emoji_helm_lint}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Helm Chart #{working_chart_directory} Lint Passed") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Helm Chart #{working_chart_directory} Lint Failed #{emoji_helm_lint}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Helm Chart #{working_chart_directory} Lint Failed") end end end @@ -561,9 +522,9 @@ task "validate_config" do |_, args| valid, warning_output = CNFManager.validate_cnf_testsuite_yml(yml) emoji_config="πŸ“‹" if valid - stdout_success "βœ”οΈ PASSED: CNF configuration validated #{emoji_config}" + stdout_success "CNF configuration validated #{emoji_config}" else - stdout_failure "❌ FAILED: Critical Error with CNF Configuration. Please review USAGE.md for steps to set up a valid CNF configuration file #{emoji_config}" + stdout_failure "Critical Error with CNF Configuration. Please review USAGE.md for steps to set up a valid CNF configuration file #{emoji_config}" end end @@ -646,14 +607,8 @@ def setup_cilium_cluster(cluster_name : String, offline : Bool) : KindManager::C end desc "CNFs should work with any Certified Kubernetes product and any CNI-compatible network that meet their functionality requirements." -task "cni_compatible" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "cni_compatible" - Log.for(testsuite_task).info { "Starting test" } - - emoji_security="πŸ”“πŸ”‘" - +task "cni_compatible" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| docker_version = DockerClient.version_info() if docker_version.installed? ensure_kubeconfig! @@ -676,9 +631,9 @@ task "cni_compatible" do |_, args| puts "CNF failed to install on Cilium CNI cluster".colorize(:red) unless cilium_cnf_passed if calico_cnf_passed && cilium_cnf_passed - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: CNF compatible with both Calico and Cilium #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "CNF compatible with both Calico and Cilium") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: CNF not compatible with either Calico or Cillium #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "CNF not compatible with either Calico or Cillium") end ensure kind_manager = KindManager.new @@ -687,7 +642,7 @@ task "cni_compatible" do |_, args| ENV["KUBECONFIG"]="#{kubeconfig_orig}" end else - upsert_skipped_task(testsuite_task, "⏭️ SKIPPED: Docker not installed #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Docker not installed") end end end diff --git a/src/tasks/workload/configuration.cr b/src/tasks/workload/configuration.cr index eb24b6836..26e15f4cd 100644 --- a/src/tasks/workload/configuration.cr +++ b/src/tasks/workload/configuration.cr @@ -32,15 +32,9 @@ task "configuration", [ end desc "Check if the CNF is running containers with labels configured?" -task "require_labels" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "require_labels" - Log.for(testsuite_task).info { "Starting test" } - +task "require_labels" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - emoji_passed = "πŸ·οΈβœ”οΈ" - emoji_failed = "🏷️❌" policy_path = Kyverno.best_practice_policy("require_labels/require_labels.yaml") failures = Kyverno::PolicyAudit.run(policy_path, EXCLUDE_NAMESPACES) @@ -48,28 +42,22 @@ task "require_labels" do |_, args| failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, failures) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Pods have the app.kubernetes.io/name label #{emoji_passed}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Pods have the app.kubernetes.io/name label") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Pods should have the app.kubernetes.io/name label. #{emoji_failed}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Pods should have the app.kubernetes.io/name label.") end end end desc "Check if the CNF installs resources in the default namespace" -task "default_namespace" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "default_namespace" - Log.for(testsuite_task).info { "Starting test" } - +task "default_namespace" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - emoji_passed = "πŸ·οΈβœ”οΈ" - emoji_failed = "🏷️❌" policy_path = Kyverno.best_practice_policy("disallow_default_namespace/disallow_default_namespace.yaml") failures = Kyverno::PolicyAudit.run(policy_path, EXCLUDE_NAMESPACES) @@ -77,29 +65,23 @@ task "default_namespace" do |_, args| failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, failures) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: default namespace is not being used #{emoji_passed}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "default namespace is not being used") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Resources are created in the default namespace #{emoji_failed}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Resources are created in the default namespace") end end end desc "Check if the CNF uses container images with the latest tag" -task "latest_tag" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "latest_tag" - Log.for(testsuite_task).info { "Starting test" } - +task "latest_tag" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - emoji_passed = "πŸ·οΈβœ”οΈ" - emoji_failed = "🏷️❌" policy_path = Kyverno.best_practice_policy("disallow_latest_tag/disallow_latest_tag.yaml") failures = Kyverno::PolicyAudit.run(policy_path, EXCLUDE_NAMESPACES) @@ -107,28 +89,23 @@ task "latest_tag" do |_, args| failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, failures) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Container images are not using the latest tag #{emoji_passed}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Container images are not using the latest tag") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Container images are using the latest tag #{emoji_failed}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Container images are using the latest tag") end end end desc "Does a search for IP addresses or subnets come back as negative?" -task "ip_addresses" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "ip_addresses" - Log.for(testsuite_task).info { "Starting test" } - +task "ip_addresses" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| cdir = FileUtils.pwd() response = String::Builder.new - emoji_network_runtime = "πŸ“ΆπŸƒβ²οΈ" helm_directory = config.cnf_config[:helm_directory] helm_chart_path = config.cnf_config[:helm_chart_path] Log.info { "Path: #{helm_chart_path}" } @@ -136,7 +113,7 @@ task "ip_addresses" do |_, args| # Switch to the helm chart directory Dir.cd(helm_chart_path) # Look for all ip addresses that are not comments - Log.for(testsuite_task).info { "current directory: #{ FileUtils.pwd()}" } + Log.for(t.name).info { "current directory: #{ FileUtils.pwd()}" } # should catch comments (# // or /*) and ignore 0.0.0.0 # note: grep wants * escaped twice Process.run("grep -r -P '^(?!.+0\.0\.0\.0)(?![[:space:]]*0\.0\.0\.0)(?!#)(?![[:space:]]*#)(?!\/\/)(?![[:space:]]*\/\/)(?!\/\\*)(?![[:space:]]*\/\\*)(.+([0-9]{1,3}[\.]){3}[0-9]{1,3})' --exclude=*.txt", shell: true) do |proc| @@ -156,22 +133,21 @@ task "ip_addresses" do |_, args| matching_line = line_parts.join(":").strip() stdout_failure(" * In file #{file_name}: #{matching_line}") end - resp = upsert_failed_task(testsuite_task,"βœ–οΈ FAILED: IP addresses found #{emoji_network_runtime}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "IP addresses found") else - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No IP addresses found #{emoji_network_runtime}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No IP addresses found") end - resp else # TODO If no helm chart directory, exit with 0 points # ADD SKIPPED tag for points.yml to allow for 0 points Dir.cd(cdir) - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No IP addresses found #{emoji_network_runtime}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No IP addresses found") end end end desc "Do all cnf images have versioned tags?" -task "versioned_tag", ["install_opa"] do |_, args| +task "versioned_tag", ["install_opa"] do |t, args| # todo wait for opa # unless KubectlClient::Get.resource_wait_for_install("Daemonset", "falco") # LOGGING.info "Falco Failed to Start" @@ -185,12 +161,7 @@ task "versioned_tag", ["install_opa"] do |_, args| # next # end # - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "versioned_tag" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } + CNFManager::Task.task_runner(args, task: t) do |args,config| fail_msgs = [] of String task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| test_passed = true @@ -216,41 +187,33 @@ task "versioned_tag", ["install_opa"] do |_, args| end test_passed end - emoji_versioned_tag="πŸ·οΈβœ”οΈ" - emoji_non_versioned_tag="🏷️❌" if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Container images use versioned tags #{emoji_versioned_tag}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Container images use versioned tags") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Container images do not use versioned tags #{emoji_non_versioned_tag}", task_start_time) fail_msgs.each do |msg| stdout_failure(msg) end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Container images do not use versioned tags") end end end desc "Does the CNF use NodePort" -task "nodeport_not_used" do |_, args| +task "nodeport_not_used" do |t, args| # TODO rename task_runner to multi_cnf_task_runner - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "nodeport_not_used" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| release_name = config.cnf_config[:release_name] service_name = config.cnf_config[:service_name] destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config, check_containers: false, check_service: true) do |resource, container, initialized| - Log.for(testsuite_task).info { "nodeport_not_used resource: #{resource}" } + Log.for(t.name).info { "nodeport_not_used resource: #{resource}" } if resource["kind"].downcase == "service" - Log.for(testsuite_task).info { "resource kind: #{resource}" } + Log.for(t.name).info { "resource kind: #{resource}" } service = KubectlClient::Get.resource(resource[:kind], resource[:name], resource[:namespace]) - Log.for(testsuite_task).debug { "service: #{service}" } + Log.for(t.name).debug { "service: #{service}" } service_type = service.dig?("spec", "type") - Log.for(testsuite_task).info { "service_type: #{service_type}" } + Log.for(t.name).info { "service_type: #{service_type}" } if service_type == "NodePort" #TODO make a service selector and display the related resources # that are tied to this service @@ -261,45 +224,40 @@ task "nodeport_not_used" do |_, args| end end if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: NodePort is not used", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "NodePort is not used") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: NodePort is being used", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "NodePort is being used") end end end desc "Does the CNF use HostPort" -task "hostport_not_used" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "hostport_not_used" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } +task "hostport_not_used" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| release_name = config.cnf_config[:release_name] service_name = config.cnf_config[:service_name] destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config, check_containers: false, check_service: true) do |resource, container, initialized| - Log.for(testsuite_task).info { "hostport_not_used resource: #{resource}" } + Log.for(t.name).info { "hostport_not_used resource: #{resource}" } test_passed=true - Log.for(testsuite_task).info { "resource kind: #{resource}" } + Log.for(t.name).info { "resource kind: #{resource}" } k8s_resource = KubectlClient::Get.resource(resource[:kind], resource[:name], resource[:namespace]) - Log.for(testsuite_task).debug { "resource: #{k8s_resource}" } + Log.for(t.name).debug { "resource: #{k8s_resource}" } # per examaple https://github.com/cnti-testcatalog/testsuite/issues/164#issuecomment-904890977 containers = k8s_resource.dig?("spec", "template", "spec", "containers") - Log.for(testsuite_task).debug { "containers: #{containers}" } + Log.for(t.name).debug { "containers: #{containers}" } containers && containers.as_a.each do |single_container| ports = single_container.dig?("ports") ports && ports.as_a.each do |single_port| - Log.for(testsuite_task).debug { "single_port: #{single_port}" } + Log.for(t.name).debug { "single_port: #{single_port}" } hostport = single_port.dig?("hostPort") - Log.for(testsuite_task).debug { "DAS hostPort: #{hostport}" } + Log.for(t.name).debug { "DAS hostPort: #{hostport}" } if hostport stdout_failure("Resource #{resource[:kind]}/#{resource[:name]} in #{resource[:namespace]} namespace is using a HostPort") @@ -311,20 +269,16 @@ task "hostport_not_used" do |_, args| test_passed end if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: HostPort is not used", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "HostPort is not used") else - upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: HostPort is being used", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "HostPort is being used") end end end desc "Does the CNF have hardcoded IPs in the K8s resource configuration" -task "hardcoded_ip_addresses_in_k8s_runtime_configuration" do |_, args| - task_response = CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "hardcoded_ip_addresses_in_k8s_runtime_configuration" - Log.for(testsuite_task).info { "Starting test" } - +task "hardcoded_ip_addresses_in_k8s_runtime_configuration" do |t, args| + task_response = CNFManager::Task.task_runner(args, task: t) do |args, config| helm_chart = config.cnf_config[:helm_chart] helm_directory = config.cnf_config[:helm_directory] release_name = config.cnf_config[:release_name] @@ -337,7 +291,7 @@ task "hardcoded_ip_addresses_in_k8s_runtime_configuration" do |_, args| unless helm_chart.empty? if args.named["offline"]? info = AirGap.tar_info_by_config_src(helm_chart) - Log.for(testsuite_task).info { "airgapped mode info: #{info}" } + Log.for(t.name).info { "airgapped mode info: #{info}" } helm_chart = info[:tar_name] end helm_install = Helm.install("--namespace hardcoded-ip-test hardcoded-ip-test #{helm_chart} --dry-run --debug > #{destination_cnf_dir}/helm_chart.yml") @@ -355,47 +309,40 @@ task "hardcoded_ip_addresses_in_k8s_runtime_configuration" do |_, args| VERBOSE_LOGGING.info "IPs: #{ip_search}" if check_verbose(args) if ip_search.empty? - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: No hard-coded IP addresses found in the runtime K8s configuration", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No hard-coded IP addresses found in the runtime K8s configuration") else - upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Hard-coded IP addresses found in the runtime K8s configuration", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Hard-coded IP addresses found in the runtime K8s configuration") end rescue - upsert_skipped_task(testsuite_task, "⏭️ πŸ† SKIPPED: unknown exception", Time.utc) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "unknown exception") ensure KubectlClient::Delete.command("namespace hardcoded-ip-test --force --grace-period 0") end end desc "Does the CNF use K8s Secrets?" -task "secrets_used" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "secrets_used" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - +task "secrets_used" do |t, args| + CNFManager::Task.task_runner(args, task:t) do |args, config| # Parse the cnf-testsuite.yml resp = "" - emoji_probe="🧫" task_response = CNFManager.workload_resource_test(args, config, check_containers: false) do |resource, containers, volumes, initialized| - Log.for(testsuite_task).info { "resource: #{resource}" } - Log.for(testsuite_task).info { "volumes: #{volumes}" } + Log.for(t.name).info { "resource: #{resource}" } + Log.for(t.name).info { "volumes: #{volumes}" } volume_test_passed = false container_secret_mounted = false # Check to see any volume secrets are actually used volumes.as_a.each do |secret_volume| if secret_volume["secret"]? - Log.for(testsuite_task).info { "secret_volume: #{secret_volume["name"]}" } + Log.for(t.name).info { "secret_volume: #{secret_volume["name"]}" } container_secret_mounted = false containers.as_a.each do |container| if container["volumeMounts"]? vmount = container["volumeMounts"].as_a - Log.for(testsuite_task).info { "vmount: #{vmount}" } - Log.for(testsuite_task).debug { "container[env]: #{container["env"]}" } + Log.for(t.name).info { "vmount: #{vmount}" } + Log.for(t.name).debug { "container[env]: #{container["env"]}" } if (vmount.find { |x| x["name"] == secret_volume["name"]? }) - Log.for(testsuite_task).debug { secret_volume["name"] } + Log.for(t.name).debug { secret_volume["name"] } container_secret_mounted = true volume_test_passed = true end @@ -420,26 +367,26 @@ task "secrets_used" do |_, args| s_name = s["metadata"]["name"] s_type = s["type"] s_namespace = s.dig("metadata", "namespace") - Log.for(testsuite_task).info {"secret name: #{s_name}, type: #{s_type}, namespace: #{s_namespace}"} if check_verbose(args) + Log.for(t.name).info {"secret name: #{s_name}, type: #{s_type}, namespace: #{s_namespace}"} if check_verbose(args) end secret_keyref_found_and_not_ignored = false containers.as_a.each do |container| c_name = container["name"] - Log.for(testsuite_task).info { "container: #{c_name} envs #{container["env"]?}" } if check_verbose(args) + Log.for(t.name).info { "container: #{c_name} envs #{container["env"]?}" } if check_verbose(args) if container["env"]? Log.for("container_info").info { container["env"] } container["env"].as_a.find do |env| - Log.for(testsuite_task).debug { "checking container: #{c_name}" } if check_verbose(args) + Log.for(t.name).debug { "checking container: #{c_name}" } if check_verbose(args) secret_keyref_found_and_not_ignored = secrets["items"].as_a.find do |s| s_name = s["metadata"]["name"] if IGNORED_SECRET_TYPES.includes?(s["type"]) Log.for("verbose").info { "container: #{c_name} ignored secret: #{s_name}" } if check_verbose(args) next end - Log.for(testsuite_task).info { "Checking secret: #{s_name}" } + Log.for(t.name).info { "Checking secret: #{s_name}" } found = (s_name == env.dig?("valueFrom", "secretKeyRef", "name")) if found - Log.for(testsuite_task).info { "secret_reference_found. container: #{c_name} found secret reference: #{s_name}" } + Log.for(t.name).info { "secret_reference_found. container: #{c_name} found secret reference: #{s_name}" } end found end @@ -461,11 +408,11 @@ task "secrets_used" do |_, args| test_passed end if task_response - resp = upsert_passed_task(testsuite_task, "βœ”οΈ ✨PASSED: Secrets defined and used #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Secrets defined and used") else - resp = upsert_skipped_task(testsuite_task, "⏭️ ✨#{secrets_used_skipped_msg(emoji_probe)}", task_start_time) + puts "Secrets not used. To address this issue please see the USAGE.md documentation".colorize(:yellow) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Secrets not used") end - resp end end @@ -569,17 +516,10 @@ def container_env_configmap_refs( end desc "Does the CNF use immutable configmaps?" -task "immutable_configmap" do |_, args| +task "immutable_configmap" do |t, args| resp = "" - emoji_probe="βš–οΈ" - - task_response = CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "immutable_configmap" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } + task_response = CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] # https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ @@ -590,14 +530,14 @@ task "immutable_configmap" do |_, args| test_config_map_filename = "#{destination_cnf_dir}/config_maps/test_config_map.yml"; template = ImmutableConfigMapTemplate.new("doesnt_matter").to_s - Log.for(testsuite_task).debug { "test immutable_configmap template: #{template}" } + Log.for(t.name).debug { "test immutable_configmap template: #{template}" } File.write(test_config_map_filename, template) KubectlClient::Apply.file(test_config_map_filename) # now we change then apply again template = ImmutableConfigMapTemplate.new("doesnt_matter_again").to_s - Log.for(testsuite_task).debug { "test immutable_configmap change template: #{template}" } + Log.for(t.name).debug { "test immutable_configmap change template: #{template}" } File.write(test_config_map_filename, template) immutable_configmap_supported = true @@ -611,14 +551,12 @@ task "immutable_configmap" do |_, args| KubectlClient::Delete.file(test_config_map_filename) if apply_result[:status].success? - Log.for(testsuite_task).info { "kubectl apply on immutable configmap succeeded for: #{test_config_map_filename}" } + Log.for(t.name).info { "kubectl apply on immutable configmap succeeded for: #{test_config_map_filename}" } k8s_ver = KubectlClient.server_version if version_less_than(k8s_ver, "1.19.0") - resp = " ⏭️ SKIPPED: immmutable configmaps are not supported in this k8s cluster.".colorize(:yellow) - upsert_skipped_task(testsuite_task, resp, task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "immmutable configmaps are not supported in this k8s cluster") else - resp = "βœ–οΈ FAILED: immmutable configmaps are not enabled in this k8s cluster.".colorize(:red) - upsert_failed_task(testsuite_task, resp, task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "immmutable configmaps are not enabled in this k8s cluster") end else @@ -626,8 +564,8 @@ task "immutable_configmap" do |_, args| envs_with_mutable_configmap = [] of MutableConfigMapsInEnvResult cnf_manager_workload_resource_task_response = CNFManager.workload_resource_test(args, config, check_containers: false, check_service: true) do |resource, containers, volumes, initialized| - Log.for(testsuite_task).info { "resource: #{resource}" } - Log.for(testsuite_task).info { "volumes: #{volumes}" } + Log.for(t.name).info { "resource: #{resource}" } + Log.for(t.name).info { "volumes: #{volumes}" } # If the install type is manifest, the namesapce would be in the manifest. # Else rely on config for helm-based install @@ -651,11 +589,8 @@ task "immutable_configmap" do |_, args| end if cnf_manager_workload_resource_task_response - resp = "βœ”οΈ ✨PASSED: All volume or container mounted configmaps immutable #{emoji_probe}".colorize(:green) - upsert_passed_task(testsuite_task, resp, task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "All volume or container mounted configmaps immutable") elsif immutable_configmap_supported - resp = "βœ–οΈ ✨FAILED: Found mutable configmap(s) #{emoji_probe}".colorize(:red) - upsert_failed_task(testsuite_task, resp, task_start_time) # Print out any mutable configmaps mounted as volumes volumes_test_results.each do |result| @@ -671,24 +606,17 @@ task "immutable_configmap" do |_, args| msg = "Mutable configmap #{result[:configmap]} used in env in #{result[:container]} part of #{result[:resource][:kind]}/#{result[:resource][:name]} in #{result[:resource][:namespace]}." stdout_failure(msg) end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found mutable configmap(s)") end - resp - end end end desc "Check if CNF uses Kubernetes alpha APIs" -task "alpha_k8s_apis" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "alpha_k8s_apis" - emoji="β­•οΈπŸ”" - Log.for(testsuite_task).info { "Starting test" } - +task "alpha_k8s_apis" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| unless check_poc(args) - upsert_skipped_task(testsuite_task, "⏭️ SKIPPED: alpha_k8s_apis not in poc mode #{emoji}", task_start_time) - next + next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "alpha_k8s_apis not in poc mode") end ensure_kubeconfig! @@ -696,8 +624,7 @@ task "alpha_k8s_apis" do |_, args| # No offline support for this task for now if args.named["offline"]? && args.named["offline"]? != "false" - upsert_skipped_task(testsuite_task, "⏭️ SKIPPED: alpha_k8s_apis chaos test skipped #{emoji}", task_start_time) - next + next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "alpha_k8s_apis chaos test skipped") end # Get kubernetes version of the current server. @@ -720,8 +647,7 @@ task "alpha_k8s_apis" do |_, args| # CNF setup failed on kind cluster. Inform in test output. unless cnf_setup_complete puts "CNF failed to install on apisnoop cluster".colorize(:red) - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Could not check CNF for usage of Kubernetes alpha APIs #{emoji}", task_start_time) - next + next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Could not check CNF for usage of Kubernetes alpha APIs") end # CNF setup was fine on kind cluster. Check for usage of alpha Kubernetes APIs. @@ -737,37 +663,22 @@ task "alpha_k8s_apis" do |_, args| api_count = result[:output].split("\n")[2].to_i if api_count == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: CNF does not use Kubernetes alpha APIs #{emoji}", task_start_time) + result = CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "CNF does not use Kubernetes alpha APIs") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: CNF uses Kubernetes alpha APIs #{emoji}", task_start_time) + result = CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "CNF uses Kubernetes alpha APIs") end ensure if cluster_name != nil KindManager.new.delete_cluster(cluster_name) ENV["KUBECONFIG"]="#{kubeconfig_orig}" end + result end end - -def secrets_used_skipped_msg(emoji) -<<-TEMPLATE -SKIPPED: Secrets not used #{emoji} - -To address this issue please see the USAGE.md documentation - -TEMPLATE -end - desc "Does the CNF install an Operator with OLM?" -task "operator_installed" do |_, args| - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "operator_installed" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - +task "operator_installed" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args,config| subscription_names = CNFManager.cnf_resources(args, config) do |resource| kind = resource.dig("kind").as_s if kind && kind.downcase == "subscription" @@ -775,7 +686,7 @@ task "operator_installed" do |_, args| end end.compact - Log.for(testsuite_task).info { "Subscription Names: #{subscription_names}" } + Log.for(t.name).info { "Subscription Names: #{subscription_names}" } #TODO Warn if csv is not found for a subscription. @@ -793,7 +704,7 @@ task "operator_installed" do |_, args| end end.compact - Log.for(testsuite_task).info { "CSV Names: #{csv_names}" } + Log.for(t.name).info { "CSV Names: #{csv_names}" } succeeded = csv_names.map do |csv| @@ -803,25 +714,13 @@ task "operator_installed" do |_, args| csv_succeeded end - Log.for(testsuite_task).info { "Succeeded CSV Names: #{succeeded}" } - - test_passed = false + Log.for(t.name).info { "Succeeded CSV Names: #{succeeded}" } if succeeded.size > 0 && succeeded.all?(true) - Log.for(testsuite_task).info { "Succeeded All True?" } - test_passed = true - end - - test_passed - - emoji_image_size="βš–οΈπŸ‘€" - emoji_small="🐜" - emoji_big="πŸ¦–" - - if test_passed - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Operator is installed: #{emoji_small} #{emoji_image_size}", task_start_time) + Log.for(t.name).info { "Succeeded All True?" } + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Operator is installed: 🐜") else - upsert_na_task(testsuite_task, "βœ–οΈ NA: No Operators Found #{emoji_big} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::NA, "No Operators Found πŸ¦–") end end end diff --git a/src/tasks/workload/microservice.cr b/src/tasks/workload/microservice.cr index 54d5ac833..a4ae86598 100644 --- a/src/tasks/workload/microservice.cr +++ b/src/tasks/workload/microservice.cr @@ -23,19 +23,13 @@ end REASONABLE_STARTUP_BUFFER = 10.0 desc "To check if the CNF has multiple microservices that share a database" -task "shared_database", ["install_cluster_tools"] do |_, args| - - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "shared_database" - Log.for(testsuite_task).info { "Starting test" } - +task "shared_database", ["install_cluster_tools"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| # todo loop through local resources and see if db match found db_match = Netstat::Mariadb.match if db_match[:found] == false - upsert_na_task(testsuite_task, "⏭️ N/A: [shared_database] No MariaDB containers were found", task_start_time) - next + next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::NA, "[shared_database] No MariaDB containers were found") end resource_ymls = CNFManager.cnf_workload_resources(args, config) { |resource| resource } @@ -99,26 +93,17 @@ task "shared_database", ["install_cluster_tools"] do |_, args| integrated_database_found = true end - failed_emoji = "(ΰ¦­_ΰ¦­) ήƒ πŸ’Ύ" - passed_emoji = "πŸ–₯️ πŸ’Ύ" - if integrated_database_found - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found a shared database #{failed_emoji}", task_start_time) + if integrated_database_found + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found a shared database (ΰ¦­_ΰ¦­) ήƒ") else - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No shared database found #{passed_emoji}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No shared database found πŸ–₯️") end end end desc "Does the CNF have a reasonable startup time (< 30 seconds)?" -task "reasonable_startup_time" do |_, args| - Log.info { "Running reasonable_startup_time test" } - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "reasonable_startup_time" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config.cnf_config}" } - +task "reasonable_startup_time" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| yml_file_path = config.cnf_config[:yml_file_path] helm_chart = config.cnf_config[:helm_chart] helm_directory = config.cnf_config[:helm_directory] @@ -133,8 +118,6 @@ task "reasonable_startup_time" do |_, args| #TODO check if json is empty startup_time = configmap["data"].as_h["startup_time"].as_s - emoji_fast="πŸš€" - emoji_slow="🐒" # Correlation for a slow box vs a fast box # sysbench base fast machine (disk), time in ms 0.16 # sysbench base slow machine (disk), time in ms 6.55 @@ -188,11 +171,10 @@ task "reasonable_startup_time" do |_, args| Log.info { "startup_time: #{startup_time.to_i}" } if startup_time.to_i <= startup_time_limit - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: CNF had a reasonable startup time #{emoji_fast}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "CNF had a reasonable startup time πŸš€") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: CNF had a startup time of #{startup_time} seconds #{emoji_slow}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "CNF had a startup time of #{startup_time} seconds 🐒") end - end end @@ -204,22 +186,17 @@ end # CRYSTAL_ENV=TEST ./cnf-testsuite reasonable_image_size # desc "Does the CNF have a reasonable container image size (< 5GB)?" -task "reasonable_image_size" do |_, args| - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "reasonable_image_size" - Log.for(testsuite_task).info { "Starting test" } - +task "reasonable_image_size" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args,config| docker_insecure_registries = [] of String if config.cnf_config[:docker_insecure_registries]? && !config.cnf_config[:docker_insecure_registries].nil? docker_insecure_registries = config.cnf_config[:docker_insecure_registries].not_nil! end unless Dockerd.install(docker_insecure_registries) - upsert_skipped_task(testsuite_task, "⏭️ SKIPPED: Skipping reasonable_image_size: Dockerd tool failed to install", task_start_time) - next + next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Skipping reasonable_image_size: Dockerd tool failed to install") end - Log.for(testsuite_task).debug { "cnf_config: #{config}" } + Log.for(t.name).debug { "cnf_config: #{config}" } task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| yml_file_path = config.cnf_config[:yml_file_path] @@ -295,7 +272,7 @@ task "reasonable_image_size" do |_, args| test_passed=false end rescue ex - Log.for(testsuite_task).error { "invalid compressed_size: #{fqdn_image} = '#{compressed_size.to_s}', #{ex.message}".colorize(:red) } + Log.for(t.name).error { "invalid compressed_size: #{fqdn_image} = '#{compressed_size.to_s}', #{ex.message}".colorize(:red) } test_passed = false end else @@ -304,14 +281,10 @@ task "reasonable_image_size" do |_, args| test_passed end - emoji_image_size="βš–οΈπŸ‘€" - emoji_small="🐜" - emoji_big="πŸ¦–" - if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Image size is good #{emoji_small} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Image size is good 🐜") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Image size too large #{emoji_big} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Image size too large πŸ¦–") end end end @@ -326,13 +299,8 @@ task "process_search" do |_, args| end desc "Do the containers in a pod have only one process type?" -task "single_process_type" do |_, args| - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "single_process_type" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } +task "single_process_type" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args,config| fail_msgs = [] of String all_node_proc_statuses = [] of NamedTuple(node_name: String, proc_statuses: Array(String)) @@ -347,15 +315,15 @@ task "single_process_type" do |_, args| containers = KubectlClient::Get.resource_containers(kind, resource[:name], resource[:namespace]) pods.map do |pod| pod_name = pod.dig("metadata", "name") - Log.for(testsuite_task).info { "pod_name: #{pod_name}" } + Log.for(t.name).info { "pod_name: #{pod_name}" } status = pod["status"] if status["containerStatuses"]? container_statuses = status["containerStatuses"].as_a - Log.for(testsuite_task).info { "container_statuses: #{container_statuses}" } - Log.for(testsuite_task).info { "pod_name: #{pod_name}" } + Log.for(t.name).info { "container_statuses: #{container_statuses}" } + Log.for(t.name).info { "pod_name: #{pod_name}" } nodes = KubectlClient::Get.nodes_by_pod(pod) - Log.for(testsuite_task).info { "nodes_by_resource done" } + Log.for(t.name).info { "nodes_by_resource done" } node = nodes.first container_statuses.map do |container_status| container_name = container_status.dig("name") @@ -363,15 +331,15 @@ task "single_process_type" do |_, args| container_id = container_status.dig("containerID").as_s ready = container_status.dig("ready").as_bool next unless ready - Log.for(testsuite_task).info { "containerStatuses container_id #{container_id}" } + Log.for(t.name).info { "containerStatuses container_id #{container_id}" } pid = ClusterTools.node_pid_by_container_id(container_id, node) - Log.for(testsuite_task).info { "node pid (should never be pid 1): #{pid}" } + Log.for(t.name).info { "node pid (should never be pid 1): #{pid}" } next unless pid node_name = node.dig("metadata", "name").as_s - Log.for(testsuite_task).info { "node name : #{node_name}" } + Log.for(t.name).info { "node name : #{node_name}" } # filtered_proc_statuses = all_node_proc_statuses.find {|x| x[:node_name] == node_name} # proc_statuses = filtered_proc_statuses ? filtered_proc_statuses[:proc_statuses] : nil # Log.debug { "node statuses : #{proc_statuses}" } @@ -389,12 +357,12 @@ task "single_process_type" do |_, args| proc_statuses) statuses.map do |status| - Log.for(testsuite_task).debug { "status: #{status}" } - Log.for(testsuite_task).info { "status cmdline: #{status["cmdline"]}" } + Log.for(t.name).debug { "status: #{status}" } + Log.for(t.name).info { "status cmdline: #{status["cmdline"]}" } status_name = status["Name"].strip ppid = status["PPid"].strip - Log.for(testsuite_task).info { "status name: #{status_name}" } - Log.for(testsuite_task).info { "previous status name: #{previous_process_type}" } + Log.for(t.name).info { "status name: #{status_name}" } + Log.for(t.name).info { "previous status name: #{previous_process_type}" } # Fail if more than one process type #todo make work if processes out of order if status_name != previous_process_type && @@ -404,7 +372,7 @@ task "single_process_type" do |_, args| status_name, statuses) unless verified - Log.for(testsuite_task).info { "multiple proc types detected verified: #{verified}" } + Log.for(t.name).info { "multiple proc types detected verified: #{verified}" } fail_msg = "resource: #{resource}, pod #{pod_name} and container: #{container_name} has more than one process type (#{statuses.map{|x|x["cmdline"]?}.compact.uniq.join(", ")})" unless fail_msgs.find{|x| x== fail_msg} puts fail_msg.colorize(:red) @@ -421,33 +389,24 @@ task "single_process_type" do |_, args| test_passed end end - emoji_image_size="βš–οΈπŸ‘€" - emoji_small="🐜" - emoji_big="πŸ¦–" if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Only one process type used #{emoji_small} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Only one process type used") else - upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: More than one process type used #{emoji_big} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "More than one process type used") end end end desc "Are the SIGTERM signals handled?" -task "zombie_handled" do |_, args| - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "zombie_handled" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - +task "zombie_handled" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args,config| task_response = CNFManager.workload_resource_test(args, config, check_containers:false ) do |resource, container, initialized| ClusterTools.all_containers_by_resource?(resource, resource[:namespace]) do | container_id, container_pid_on_node, node, container_proctree_statuses, container_status| resp = ClusterTools.exec_by_node("runc --root /run/containerd/runc/k8s.io/ state #{container_id}", node) - Log.for(testsuite_task).info { "resp[:output] #{resp[:output]}" } + Log.for(t.name).info { "resp[:output] #{resp[:output]}" } bundle_path = JSON.parse(resp[:output].to_s) - Log.for(testsuite_task).info { "bundle path: #{bundle_path["bundle"]} "} + Log.for(t.name).info { "bundle path: #{bundle_path["bundle"]} "} ClusterTools.exec_by_node("nerdctl --namespace=k8s.io cp /zombie #{container_id}:/zombie", node) ClusterTools.exec_by_node("nerdctl --namespace=k8s.io cp /sleep #{container_id}:/sleep", node) # ClusterTools.exec_by_node("ctools --bundle_path --container_id ") @@ -461,15 +420,15 @@ task "zombie_handled" do |_, args| ClusterTools.all_containers_by_resource?(resource, resource[:namespace]) do | container_id, container_pid_on_node, node, container_proctree_statuses, container_status| zombies = container_proctree_statuses.map do |status| - Log.for(testsuite_task).debug { "status: #{status}" } - Log.for(testsuite_task).info { "status cmdline: #{status["cmdline"]}" } + Log.for(t.name).debug { "status: #{status}" } + Log.for(t.name).info { "status cmdline: #{status["cmdline"]}" } status_name = status["Name"].strip current_pid = status["Pid"].strip state = status["State"].strip - Log.for(testsuite_task).info { "pid: #{current_pid}" } - Log.for(testsuite_task).info { "status name: #{status_name}" } - Log.for(testsuite_task).info { "state: #{state}" } - Log.for(testsuite_task).info { "(state =~ /zombie/): #{(state =~ /zombie/)}" } + Log.for(t.name).info { "pid: #{current_pid}" } + Log.for(t.name).info { "status name: #{status_name}" } + Log.for(t.name).info { "state: #{state}" } + Log.for(t.name).info { "(state =~ /zombie/): #{(state =~ /zombie/)}" } if (state =~ /zombie/) != nil puts "Process #{status_name} has a state of #{state}".colorize(:red) true @@ -477,35 +436,24 @@ task "zombie_handled" do |_, args| nil end end - Log.for(testsuite_task).info { "zombies.all?(nil): #{zombies.all?(nil)}" } + Log.for(t.name).info { "zombies.all?(nil): #{zombies.all?(nil)}" } zombies.all?(nil) end end - emoji_image_size="βš–οΈπŸ‘€" - emoji_small="🐜" - emoji_big="πŸ¦–" - if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Zombie handled #{emoji_small} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Zombie handled") else - upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Zombie not handled #{emoji_big} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Zombie not handled") end end - end desc "Are the SIGTERM signals handled?" -task "sig_term_handled" do |_, args| - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "sig_term_handled" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - +task "sig_term_handled" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args,config| # test_status can be "skipped" or "failed". # Only collecting containers that failed or were skipped. # @@ -545,10 +493,10 @@ task "sig_term_handled" do |_, args| status = pod["status"] if status["containerStatuses"]? container_statuses = status["containerStatuses"].as_a - Log.for(testsuite_task).info { "container_statuses: #{container_statuses}" } - Log.for(testsuite_task).info { "pod_name: #{pod_name}" } + Log.for(t.name).info { "container_statuses: #{container_statuses}" } + Log.for(t.name).info { "pod_name: #{pod_name}" } nodes = KubectlClient::Get.nodes_by_pod(pod) - Log.for(testsuite_task).info { "nodes_by_resource done" } + Log.for(t.name).info { "nodes_by_resource done" } node = nodes.first # there should only be one node returned for one pod sig_result = container_statuses.map do |container_status| container_name = container_status.dig("name") @@ -557,7 +505,7 @@ task "sig_term_handled" do |_, args| # Check if the container status is ready. # If this container is not ready, move on to next. container_name = container_status.dig("name").as_s - Log.for(testsuite_task).info { "before ready containerStatuses pod:#{pod_name} container:#{container_name}" } + Log.for(t.name).info { "before ready containerStatuses pod:#{pod_name} container:#{container_name}" } ready = container_status.dig("ready").as_bool if !ready Log.info { "container status: #{container_status} "} @@ -574,7 +522,7 @@ task "sig_term_handled" do |_, args| end container_id = container_status.dig("containerID").as_s - Log.for(testsuite_task).info { "containerStatuses container_id #{container_id}" } + Log.for(t.name).info { "containerStatuses container_id #{container_id}" } #get container id's pid on the node (different from inside the container) pid = "#{ClusterTools.node_pid_by_container_id(container_id, node)}" @@ -592,7 +540,7 @@ task "sig_term_handled" do |_, args| end # next if pid.empty? - Log.for(testsuite_task).info { "node pid (should never be pid 1): #{pid}" } + Log.for(t.name).info { "node pid (should never be pid 1): #{pid}" } # need to do the next line. how to kill the current cnf? # this was one of the reason why we did stuff like this durring the cnf install and saved it as a configmap @@ -605,9 +553,9 @@ task "sig_term_handled" do |_, args| #todo 2.1 loop through all child processes that are not threads (only include proceses where tgid = pid) #todo 2.1.1 ignore the parent pid (we are on the host so it wont be pid 1) node_name = node.dig("metadata", "name").as_s - Log.for(testsuite_task).info { "node name : #{node_name}" } + Log.for(t.name).info { "node name : #{node_name}" } pids = KernelIntrospection::K8s::Node.pids(node) - Log.for(testsuite_task).info { "proctree_by_pid pids: #{pids}" } + Log.for(t.name).info { "proctree_by_pid pids: #{pids}" } proc_statuses = KernelIntrospection::K8s::Node.all_statuses_by_pids(pids, node) statuses = KernelIntrospection::K8s::Node.proctree_by_pid(pid, node, proc_statuses) @@ -625,16 +573,16 @@ task "sig_term_handled" do |_, args| end end non_thread_statuses.map do |status| - Log.for(testsuite_task).debug { "status: #{status}" } - Log.for(testsuite_task).info { "status cmdline: #{status["cmdline"]}" } + Log.for(t.name).debug { "status: #{status}" } + Log.for(t.name).info { "status cmdline: #{status["cmdline"]}" } status_name = status["Name"].strip ppid = status["PPid"].strip current_pid = status["Pid"].strip tgid = status["Tgid"].strip # check if 'g' is uppercase - Log.for(testsuite_task).info { "Pid: #{current_pid}" } - Log.for(testsuite_task).info { "Tgid: #{tgid}" } - Log.for(testsuite_task).info { "status name: #{status_name}" } - Log.for(testsuite_task).info { "previous status name: #{previous_process_type}" } + Log.for(t.name).info { "Pid: #{current_pid}" } + Log.for(t.name).info { "Tgid: #{tgid}" } + Log.for(t.name).info { "status name: #{status_name}" } + Log.for(t.name).info { "previous status name: #{previous_process_type}" } # do not count the top pid if there are children if non_thread_statuses.size > 1 && pid == current_pid next @@ -657,7 +605,7 @@ task "sig_term_handled" do |_, args| #todo 2.2 wait for 30 seconds end ClusterTools.exec_by_node("bash -c 'sleep 10 && kill #{pid} && sleep 5 && kill -9 #{pid}'", node) - Log.for(testsuite_task).info { "pid_log_names: #{pid_log_names}" } + Log.for(t.name).info { "pid_log_names: #{pid_log_names}" } #todo 2.3 parse the logs #todo get the log sleep 5 @@ -676,7 +624,7 @@ task "sig_term_handled" do |_, args| false end end - Log.for(testsuite_task).info { "SigTerm Found: #{sig_term_found}" } + Log.for(t.name).info { "SigTerm Found: #{sig_term_found}" } # per all containers container_sig_term_check = sig_term_found.all?(true) if container_sig_term_check == false @@ -705,14 +653,10 @@ task "sig_term_handled" do |_, args| true # non "deployment","statefulset","pod","replicaset", and "daemonset" don't need a sigterm check end end - emoji_image_size="βš–οΈπŸ‘€" - emoji_small="🐜" - emoji_big="πŸ¦–" if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Sig Term handled #{emoji_small} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Sig Term handled") else - upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Sig Term not handled #{emoji_big} #{emoji_image_size}", task_start_time) failed_containers.map do |failure_info| resource_output = "Pod: #{failure_info["pod"]}, Container: #{failure_info["container"]}, Result: #{failure_info["test_status"]}" if failure_info["test_status"] == "skipped" @@ -720,18 +664,14 @@ task "sig_term_handled" do |_, args| end stdout_failure resource_output end - nil + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Sig Term not handled") end end end desc "Are any of the containers exposed as a service?" -task "service_discovery" do |_, args| - CNFManager::Task.task_runner(args) do |args,config| - task_start_time = Time.utc - testsuite_task = "service_discovery" - Log.for(testsuite_task).info { "Starting test" } - +task "service_discovery" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args,config| # Get all resources for the CNF resource_ymls = CNFManager.cnf_workload_resources(args, config) { |resource| resource } default_namespace = "default" @@ -772,35 +712,27 @@ task "service_discovery" do |_, args| end end - emoji_image_size="βš–οΈπŸ‘€" - emoji_small="🐜" - emoji_big="πŸ¦–" - if test_passed - upsert_passed_task(testsuite_task, "βœ”οΈ ✨PASSED: Some containers exposed as a service #{emoji_small} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Some containers exposed as a service") else - upsert_failed_task(testsuite_task, "βœ–οΈ ✨FAILED: No containers exposed as a service #{emoji_big} #{emoji_image_size}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "No containers exposed as a service") end end end desc "To check if the CNF uses a specialized init system" -task "specialized_init_system", ["install_cluster_tools"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "specialized_init_system" - Log.for(testsuite_task).info { "Starting test" } - +task "specialized_init_system", ["install_cluster_tools"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| failed_cnf_resources = [] of InitSystems::InitSystemInfo CNFManager.workload_resource_test(args, config) do |resource, container, initialized| kind = resource["kind"].downcase case kind when "deployment","statefulset","pod","replicaset", "daemonset" namespace = resource[:namespace] - Log.for(testsuite_task).info { "Checking resource #{resource[:kind]}/#{resource[:name]} in #{namespace}" } + Log.for(t.name).info { "Checking resource #{resource[:kind]}/#{resource[:name]} in #{namespace}" } resource_yaml = KubectlClient::Get.resource(resource[:kind], resource[:name], resource[:namespace]) pods = KubectlClient::Get.pods_by_resource(resource_yaml, namespace) - Log.for(testsuite_task).info { "Pod count for resource #{resource[:kind]}/#{resource[:name]} in #{namespace}: #{pods.size}" } + Log.for(t.name).info { "Pod count for resource #{resource[:kind]}/#{resource[:name]} in #{namespace}: #{pods.size}" } pods.each do |pod| results = InitSystems.scan(pod) failed_cnf_resources = failed_cnf_resources + results @@ -808,17 +740,13 @@ task "specialized_init_system", ["install_cluster_tools"] do |_, args| end end - failed_emoji = "(ΰ¦­_ΰ¦­) ήƒ πŸš€" - passed_emoji = "πŸ–₯️ πŸš€" - if failed_cnf_resources.size > 0 - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Containers do not use specialized init systems #{failed_emoji}", task_start_time) failed_cnf_resources.each do |init_info| stdout_failure "#{init_info.kind}/#{init_info.name} has container '#{init_info.container}' with #{init_info.init_cmd} as init process" end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Containers do not use specialized init systems (ΰ¦­_ΰ¦­) ήƒ") else - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Containers use specialized init systems #{passed_emoji}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Containers use specialized init systems πŸ–₯️") end - end end diff --git a/src/tasks/workload/ran.cr b/src/tasks/workload/ran.cr index 0333507e9..a52a2950e 100644 --- a/src/tasks/workload/ran.cr +++ b/src/tasks/workload/ran.cr @@ -14,13 +14,8 @@ task "ran", ["oran_e2_connection"] do |_, args| end end desc "Test if RAN uses the ORAN e2 interface" -task "oran_e2_connection" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "oran_e2_connection" - Log.for(testsuite_task).info { "Starting test" } - - Log.debug { "cnf_config: #{config}" } +task "oran_e2_connection" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| release_name = config.cnf_config[:release_name] destination_cnf_dir = CNFManager.cnf_destination_dir(CNFManager.ensure_cnf_testsuite_dir(args.named["cnf-config"].as(String))) if ORANMonitor.isCNFaRIC?(config.cnf_config) @@ -29,14 +24,12 @@ task "oran_e2_connection" do |_, args| if e2_found == "true" - resp = upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: RAN connects to a RIC using the e2 standard interface", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "RAN connects to a RIC using the e2 standard interface") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: RAN does not connect to a RIC using the e2 standard interface", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "RAN does not connect to a RIC using the e2 standard interface") end - resp else - upsert_na_task(testsuite_task, "⏭️ N/A: [oran_e2_connection] No ric designated in cnf_testsuite.yml for #{destination_cnf_dir}", task_start_time) - next + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::NA, "[oran_e2_connection] No ric designated in cnf_testsuite.yml for #{destination_cnf_dir}") end end diff --git a/src/tasks/workload/reliability.cr b/src/tasks/workload/reliability.cr index 5ae1f9b41..57715a9f2 100644 --- a/src/tasks/workload/reliability.cr +++ b/src/tasks/workload/reliability.cr @@ -28,84 +28,64 @@ desc "The CNF test suite checks to see if the CNFs are resilient to failures." end desc "Is there a liveness entry in the helm chart?" -task "liveness" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "liveness" - Log.for(testsuite_task).info { "Starting test" } - Log.for(testsuite_task).debug { "cnf_config: #{config}" } - +task "liveness" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| resp = "" - emoji_probe="⎈🧫" task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| test_passed = true resource_ref = "#{resource[:kind]}/#{resource[:name]}" begin - Log.for(testsuite_task).debug { container.as_h["name"].as_s } if check_verbose(args) + Log.for(t.name).debug { container.as_h["name"].as_s } if check_verbose(args) container.as_h["livenessProbe"].as_h rescue ex - Log.for(testsuite_task).error { ex.message } if check_verbose(args) + Log.for(t.name).error { ex.message } if check_verbose(args) test_passed = false stdout_failure("No livenessProbe found for container #{container.as_h["name"].as_s} part of #{resource_ref} in #{resource[:namespace]} namespace") end - Log.for(testsuite_task).info { "Resource #{resource_ref} passed liveness?: #{test_passed}" } + Log.for(t.name).info { "Resource #{resource_ref} passed liveness?: #{test_passed}" } test_passed end - Log.for(testsuite_task).info { "Workload resource task response: #{task_response}" } + Log.for(t.name).info { "Workload resource task response: #{task_response}" } if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ πŸ† PASSED: Helm liveness probe found #{emoji_probe}", task_start_time) - else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ πŸ† FAILED: No livenessProbe found #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Helm liveness probe found") + else + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "No livenessProbe found") end - resp end end desc "Is there a readiness entry in the helm chart?" -task "readiness" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "readiness" - Log.for(testsuite_task).info { "Starting test" } - - Log.for("readiness").info { "Starting test" } - Log.for("readiness").debug { "cnf_config: #{config}" } +task "readiness" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| resp = "" - emoji_probe="⎈🧫" task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| test_passed = true resource_ref = "#{resource[:kind]}/#{resource[:name]}" begin - Log.for(testsuite_task).debug { container.as_h["name"].as_s } if check_verbose(args) + Log.for(t.name).debug { container.as_h["name"].as_s } if check_verbose(args) container.as_h["readinessProbe"].as_h rescue ex - Log.for(testsuite_task).error { ex.message } if check_verbose(args) + Log.for(t.name).error { ex.message } if check_verbose(args) test_passed = false stdout_failure("No readinessProbe found for container #{container.as_h["name"].as_s} part of #{resource_ref} in #{resource[:namespace]} namespace") end - Log.for(testsuite_task).info { "Resource #{resource_ref} passed liveness?: #{test_passed}" } + Log.for(t.name).info { "Resource #{resource_ref} passed liveness?: #{test_passed}" } test_passed end - Log.for(testsuite_task).info { "Workload resource task response: #{task_response}" } + Log.for(t.name).info { "Workload resource task response: #{task_response}" } if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ πŸ† PASSED: Helm readiness probe found #{emoji_probe}", task_start_time) - else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ πŸ† FAILED: No readinessProbe found #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Helm readiness probe found") + else + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "No readinessProbe found") end - resp end end desc "Does the CNF crash when network latency occurs" -task "pod_network_latency", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| +task "pod_network_latency", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| #todo if args has list of labels to perform test on, go into pod specific mode - task_start_time = Time.utc - testsuite_task = "pod_network_latency" - Log.for(testsuite_task).info { "Starting test" } - - Log.debug { "cnf_config: #{config}" } #TODO tests should fail if cnf not installed destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| @@ -158,10 +138,10 @@ task "pod_network_latency", ["install_litmus"] do |_, args| # https://raw.githubusercontent.com/litmuschaos/chaos-charts/v2.14.x/charts/generic/pod-network-latency/rbac.yaml rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-network-latency/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -205,9 +185,9 @@ task "pod_network_latency", ["install_litmus"] do |_, args| unless args.named["pod_labels"]? #todo if in pod specific mode, dont do upserts and resp = "" if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: pod_network_latency chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_network_latency chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: pod_network_latency chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_network_latency chaos test failed") end end @@ -215,13 +195,8 @@ task "pod_network_latency", ["install_litmus"] do |_, args| end desc "Does the CNF crash when network corruption occurs" -task "pod_network_corruption", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "pod_network_corruption" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } +task "pod_network_corruption", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| #TODO tests should fail if cnf not installed destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| @@ -246,10 +221,10 @@ task "pod_network_corruption", ["install_litmus"] do |_, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-corruption/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-network-corruption/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -278,21 +253,16 @@ task "pod_network_corruption", ["install_litmus"] do |_, args| end end if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: pod_network_corruption chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_network_corruption chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: pod_network_corruption chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_network_corruption chaos test failed") end end end desc "Does the CNF crash when network duplication occurs" -task "pod_network_duplication", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "pod_network_duplication" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } +task "pod_network_duplication", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| #TODO tests should fail if cnf not installed destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| @@ -317,10 +287,10 @@ task "pod_network_duplication", ["install_litmus"] do |_, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-duplication/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-network-duplication/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -349,21 +319,16 @@ task "pod_network_duplication", ["install_litmus"] do |_, args| end end if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: pod_network_duplication chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_network_duplication chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: pod_network_duplication chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_network_duplication chaos test failed") end end end desc "Does the CNF crash when disk fill occurs" -task "disk_fill", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "disk_fill" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } +task "disk_fill", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| app_namespace = resource[:namespace] || config.cnf_config[:helm_install_namespace] @@ -371,7 +336,7 @@ task "disk_fill", ["install_litmus"] do |_, args| if spec_labels.as_h? && spec_labels.as_h.size > 0 test_passed = true else - stdout_failure("No resource label found for #{testsuite_task} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") + stdout_failure("No resource label found for #{t.name} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") test_passed = false end if test_passed @@ -386,10 +351,10 @@ task "disk_fill", ["install_litmus"] do |_, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/disk-fill/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/disk-fill/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -419,22 +384,17 @@ task "disk_fill", ["install_litmus"] do |_, args| end test_passed end - if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: disk_fill chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + if task_response + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "disk_fill chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ FAILED: disk_fill chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "disk_fill chaos test failed") end end end desc "Does the CNF crash when pod-delete occurs" -task "pod_delete", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "pod_delete" - Log.for(testsuite_task).info { "Starting test" } - - Log.debug { "cnf_config: #{config}" } +task "pod_delete", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] #todo clear all annotations task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| @@ -443,7 +403,7 @@ task "pod_delete", ["install_litmus"] do |_, args| if spec_labels.as_h? && spec_labels.as_h.size > 0 test_passed = true else - stdout_failure("No resource label found for #{testsuite_task} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") + stdout_failure("No resource label found for #{t.name} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") test_passed = false end @@ -483,11 +443,11 @@ task "pod_delete", ["install_litmus"] do |_, args| else # experiment_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-delete/experiment.yaml" experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-delete/experiment.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-delete/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-delete/rbac.yaml" - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -539,22 +499,17 @@ task "pod_delete", ["install_litmus"] do |_, args| end unless args.named["pod_labels"]? if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: pod_delete chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_delete chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ FAILED: pod_delete chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_delete chaos test failed") end end end end desc "Does the CNF crash when pod-memory-hog occurs" -task "pod_memory_hog", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "pod_memory_hog" - Log.for(testsuite_task).info { "Starting test" } - - Log.debug { "cnf_config: #{config}" } +task "pod_memory_hog", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| app_namespace = resource[:namespace] || config.cnf_config[:helm_install_namespace] @@ -562,7 +517,7 @@ task "pod_memory_hog", ["install_litmus"] do |_, args| if spec_labels.as_h? && spec_labels.as_h.size > 0 test_passed = true else - stdout_failure("No resource label found for #{testsuite_task} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") + stdout_failure("No resource label found for #{t.name} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") test_passed = false end if test_passed @@ -577,10 +532,10 @@ task "pod_memory_hog", ["install_litmus"] do |_, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-memory-hog/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-memory-hog/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -613,21 +568,16 @@ task "pod_memory_hog", ["install_litmus"] do |_, args| test_passed end if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: pod_memory_hog chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_memory_hog chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ FAILED: pod_memory_hog chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_memory_hog chaos test failed") end end end desc "Does the CNF crash when pod-io-stress occurs" -task "pod_io_stress", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "pod_io_stress" - Log.for(testsuite_task).info { "Starting test" } - - Log.for(testsuite_task).debug { "cnf_config: #{config}" } +task "pod_io_stress", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| app_namespace = resource[:namespace] || config.cnf_config[:helm_install_namespace] @@ -635,7 +585,7 @@ task "pod_io_stress", ["install_litmus"] do |_, args| if spec_labels.as_h? && spec_labels.as_h.size > 0 test_passed = true else - stdout_failure("No resource label found for #{testsuite_task} test for resource: #{resource["name"]} in #{resource["namespace"]}") + stdout_failure("No resource label found for #{t.name} test for resource: #{resource["name"]} in #{resource["namespace"]}") test_passed = false end if test_passed @@ -650,10 +600,10 @@ task "pod_io_stress", ["install_litmus"] do |_, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-io-stress/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-io-stress/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -685,9 +635,9 @@ task "pod_io_stress", ["install_litmus"] do |_, args| end end if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: #{testsuite_task} chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_io_stress chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: #{testsuite_task} chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_io_stress chaos test failed") end end ensure @@ -698,13 +648,8 @@ end desc "Does the CNF crash when pod-dns-error occurs" -task "pod_dns_error", ["install_litmus"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "pod_dns_error" - Log.for(testsuite_task).info { "Starting test" } - - Log.debug { "cnf_config: #{config}" } +task "pod_dns_error", ["install_litmus"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] runtimes = KubectlClient::Get.container_runtimes Log.info { "pod_dns_error runtimes: #{runtimes}" } @@ -715,7 +660,7 @@ task "pod_dns_error", ["install_litmus"] do |_, args| if spec_labels.as_h? && spec_labels.as_h.size > 0 test_passed = true else - stdout_failure("No resource label found for #{testsuite_task} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") + stdout_failure("No resource label found for #{t.name} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") test_passed = false end if test_passed @@ -730,10 +675,10 @@ task "pod_dns_error", ["install_litmus"] do |_, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-dns-error/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/pod-dns-error/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -764,12 +709,12 @@ task "pod_dns_error", ["install_litmus"] do |_, args| end end if task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: pod_dns_error chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "pod_dns_error chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: pod_dns_error chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "pod_dns_error chaos test failed") end else - resp = upsert_skipped_task(testsuite_task,"⏭️ ✨SKIPPED: pod_dns_error docker runtime not found πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "pod_dns_error docker runtime not found") end end end diff --git a/src/tasks/workload/security.cr b/src/tasks/workload/security.cr index ba484c1cb..d157cd534 100644 --- a/src/tasks/workload/security.cr +++ b/src/tasks/workload/security.cr @@ -35,41 +35,31 @@ task "security", [ end desc "Check if pods in the CNF use sysctls with restricted values" -task "sysctls" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "sysctls" - Log.for(testsuite_task).info { "Starting test" } +task "sysctls" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - - emoji_security = "πŸ”“πŸ”‘" policy_path = Kyverno.policy_path("pod-security/baseline/restrict-sysctls/restrict-sysctls.yaml") failures = Kyverno::PolicyAudit.run(policy_path, EXCLUDE_NAMESPACES) resource_keys = CNFManager.workload_resource_keys(args, config) failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, failures) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No restricted values found for sysctls #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No restricted values found for sysctls") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Restricted values for are being used for sysctls #{emoji_security}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Restricted values for are being used for sysctls") end end end desc "Check if the CNF has services with external IPs configured" -task "external_ips" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "external_ips" - Log.for(testsuite_task).info { "Starting test" } - +task "external_ips" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - emoji_security = "πŸ”“πŸ”‘" policy_path = Kyverno.best_practice_policy("restrict-service-external-ips/restrict-service-external-ips.yaml") failures = Kyverno::PolicyAudit.run(policy_path, EXCLUDE_NAMESPACES) @@ -77,28 +67,22 @@ task "external_ips" do |_, args| failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, failures) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Services are not using external IPs #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Services are not using external IPs") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Services are using external IPs #{emoji_security}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Services are using external IPs") end end end desc "Check if the CNF or the cluster resources have custom SELinux options" -task "selinux_options" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "selinux_options" - Log.for(testsuite_task).info { "Starting test" } - +task "selinux_options" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - - emoji_security = "πŸ”“πŸ”‘" check_policy_path = Kyverno::CustomPolicies::SELinuxEnabled.new.policy_path check_failures = Kyverno::PolicyAudit.run(check_policy_path, EXCLUDE_NAMESPACES) @@ -113,59 +97,47 @@ task "selinux_options" do |_, args| check_failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, check_failures) if check_failures.size == 0 - # upsert_skipped_task("selinux_options", "⏭️ πŸ† SKIPPED: Pods are not using SELinux options #{emoji_security}", Time.utc) - upsert_na_task(testsuite_task, "⏭️ πŸ† N/A: Pods are not using SELinux #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::NA, "Pods are not using SELinux") else failures = Kyverno.filter_failures_for_cnf_resources(resource_keys, disallow_failures) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Pods are not using custom SELinux options that can be used for privilege escalations #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Pods are not using custom SELinux options that can be used for privilege escalations") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Pods are using custom SELinux options that can be used for privilege escalations #{emoji_security}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Pods are using custom SELinux options that can be used for privilege escalations") end - end - end end desc "Check if the CNF is running containers with container sock mounts" -task "container_sock_mounts" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "container_sock_mounts" - Log.for(testsuite_task).info { "Starting test" } - +task "container_sock_mounts" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| Kyverno.install - emoji_security = "πŸ”“πŸ”‘" policy_path = Kyverno.best_practice_policy("disallow_cri_sock_mount/disallow_cri_sock_mount.yaml") failures = Kyverno::PolicyAudit.run(policy_path, EXCLUDE_NAMESPACES) if failures.size == 0 - resp = upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Container engine daemon sockets are not mounted as volumes #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Container engine daemon sockets are not mounted as volumes") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Container engine daemon sockets are mounted as volumes #{emoji_security}", task_start_time) failures.each do |failure| failure.resources.each do |resource| puts "#{resource.kind} #{resource.name} in #{resource.namespace} namespace failed. #{failure.message}".colorize(:red) end end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Container engine daemon sockets are mounted as volumes") end end end desc "Check if any containers are running in privileged mode" -task "privileged" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "privileged" - Log.for(testsuite_task).info { "Starting test" } - +task "privileged" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| white_list_container_names = config.cnf_config[:white_list_container_names] VERBOSE_LOGGING.info "white_list_container_names #{white_list_container_names.inspect}" if check_verbose(args) violation_list = [] of NamedTuple(kind: String, name: String, container: String, namespace: String) @@ -184,383 +156,298 @@ task "privileged" do |_, args| end end Log.debug { "violator list: #{violation_list.flatten}" } - emoji_security="πŸ”“πŸ”‘" - if task_response - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No privileged containers #{emoji_security}", task_start_time) + if task_response + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No privileged containers") else - upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found #{violation_list.size} privileged containers #{emoji_security}", task_start_time) violation_list.each do |violation| stdout_failure("Privileged container #{violation[:container]} in #{violation[:kind]}/#{violation[:name]} in the #{violation[:namespace]} namespace") end + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found #{violation_list.size} privileged containers") end end end desc "Check if any containers are running in privileged mode" -task "privilege_escalation", ["kubescape_scan"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "privilege_escalation" - Log.for(testsuite_task).info { "Starting test" } - +task "privilege_escalation", ["kubescape_scan"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Allow privilege escalation") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security="πŸ”“πŸ”‘" - if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No containers that allow privilege escalation were found #{emoji_security}", task_start_time) + if test_report.failed_resources.size == 0 + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No containers that allow privilege escalation were found") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found containers that allow privilege escalation #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers that allow privilege escalation") end end end desc "Check if an attacker can use symlink for arbitrary host file system access." -task "symlink_file_system", ["kubescape_scan"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "symlink_file_system" - Log.for(testsuite_task).info { "Starting test" } - +task "symlink_file_system", ["kubescape_scan"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "CVE-2021-25741 - Using symlink for arbitrary host file system access.") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security="πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No containers allow a symlink attack #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No containers allow a symlink attack") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found containers that allow a symlink attack #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers that allow a symlink attack") end end end desc "Check if applications credentials are in configuration files." -task "application_credentials", ["kubescape_scan"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "application_credentials" - Log.for(testsuite_task).info { "Starting test" } - +task "application_credentials", ["kubescape_scan"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Applications credentials in configuration files") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security="πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No applications credentials in configuration files #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No applications credentials in configuration files") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found applications credentials in configuration files #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found applications credentials in configuration files") end end end desc "Check if potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC." -task "host_network", ["kubescape_scan"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "host_network" - Log.for(testsuite_task).info { "Starting test" } - +task "host_network", ["kubescape_scan"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "HostNetwork access") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security="πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No host network attached to pod #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No host network attached to pod") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found host network attached to pod #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found host network attached to pod") end end end desc "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them." -task "service_account_mapping", ["kubescape_scan"] do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "service_account_mapping" - Log.for(testsuite_task).info { "Starting test" } - +task "service_account_mapping", ["kubescape_scan"] do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Automatic mapping of service account") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security="πŸ”“πŸ”‘" - if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No service accounts automatically mapped #{emoji_security}", task_start_time) + if test_report.failed_resources.size == 0 + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No service accounts automatically mapped") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Service accounts automatically mapped #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Service accounts automatically mapped") end end end desc "Check if security services are being used to harden the application" -task "linux_hardening", ["kubescape_scan"] do |_, args| +task "linux_hardening", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "linux_hardening" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Linux hardening") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ ✨PASSED: Security services are being used to harden applications #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Security services are being used to harden applications") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ ✨FAILED: Found resources that do not use security services #{emoji_security}", task_start_time) - test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } - stdout_failure("Remediation: #{test_report.remediation}") - resp + test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } + stdout_failure("Remediation: #{test_report.remediation}") + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found resources that do not use security services") end end end desc "Check if the containers have insecure capabilities." -task "insecure_capabilities", ["kubescape_scan"] do |_, args| +task "insecure_capabilities", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "insecure_capabilities" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Insecure capabilities") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Containers with insecure capabilities were not found #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Containers with insecure capabilities were not found") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found containers with insecure capabilities #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers with insecure capabilities") end end end desc "Check if the containers have resource limits defined." -task "resource_policies", ["kubescape_scan"] do |_, args| +task "resource_policies", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "resource_policies" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Resource policies") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Containers have resource limits defined #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Containers have resource limits defined") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Found containers without resource limits defined #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers without resource limits defined") end end end desc "Check Ingress and Egress traffic policy" -task "ingress_egress_blocked", ["kubescape_scan"] do |_, args| +task "ingress_egress_blocked", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "ingress_egress_blocked" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Ingress and Egress blocked") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ ✨PASSED: Ingress and Egress traffic blocked on pods #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Ingress and Egress traffic blocked on pods") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ ✨FAILED: Ingress and Egress traffic not blocked on pods #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Ingress and Egress traffic not blocked on pods") end end end desc "Check the Host PID/IPC privileges of the containers" -task "host_pid_ipc_privileges", ["kubescape_scan"] do |_, args| +task "host_pid_ipc_privileges", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "host_pid_ipc_privileges" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Host PID/IPC privileges") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: No containers with hostPID and hostIPC privileges #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No containers with hostPID and hostIPC privileges") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found containers with hostPID and hostIPC privileges #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers with hostPID and hostIPC privileges") end end end desc "Check if the containers are running with non-root user with non-root group membership" -task "non_root_containers", ["kubescape_scan"] do |_, args| +task "non_root_containers", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "non_root_containers" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Non-root containers") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: Containers are running with non-root user with non-root group membership #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Containers are running with non-root user with non-root group membership") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Found containers running with root user or user with root group membership #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers running with root user or user with root group membership") end end end desc "Check that privileged containers are not used" -task "privileged_containers", ["kubescape_scan" ] do |_, args| +task "privileged_containers", ["kubescape_scan" ] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "privileged_containers" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Privileged container") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" #todo whitelist if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ πŸ† PASSED: No privileged containers were found #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "No privileged containers were found") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ πŸ† FAILED: Found privileged containers #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found privileged containers") end end end desc "Check if containers have immutable file systems" -task "immutable_file_systems", ["kubescape_scan"] do |_, args| +task "immutable_file_systems", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "immutable_file_systems" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Immutable container filesystem") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ ✨PASSED: Containers have immutable file systems #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Containers have immutable file systems") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ ✨FAILED: Found containers with mutable file systems #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers with mutable file systems") end end end desc "Check if containers have hostPath mounts" -task "hostpath_mounts", ["kubescape_scan"] do |_, args| +task "hostpath_mounts", ["kubescape_scan"] do |t, args| next if args.named["offline"]? - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "hostpath_mounts" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| results_json = Kubescape.parse test_json = Kubescape.test_by_test_name(results_json, "Allowed hostPath") test_report = Kubescape.parse_test_report(test_json) resource_keys = CNFManager.workload_resource_keys(args, config) test_report = Kubescape.filter_cnf_resources(test_report, resource_keys) - emoji_security = "πŸ”“πŸ”‘" if test_report.failed_resources.size == 0 - upsert_passed_task(testsuite_task, "βœ”οΈ PASSED: Containers do not have hostPath mounts #{emoji_security}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "Containers do not have hostPath mounts") else - resp = upsert_failed_task(testsuite_task, "βœ–οΈ FAILED: Found containers with hostPath mounts #{emoji_security}", task_start_time) test_report.failed_resources.map {|r| stdout_failure(r.alert_message) } stdout_failure("Remediation: #{test_report.remediation}") - resp + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Found containers with hostPath mounts") end end end diff --git a/src/tasks/workload/state.cr b/src/tasks/workload/state.cr index ff3c91702..0f00b28f2 100644 --- a/src/tasks/workload/state.cr +++ b/src/tasks/workload/state.cr @@ -217,13 +217,8 @@ end desc "Does the CNF crash when node-drain occurs" task "node_drain", ["install_litmus"] do |t, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "node_drain" - Log.for(testsuite_task).info { "Starting test" } - + CNFManager::Task.task_runner(args, task: t) do |args, config| skipped = false - Log.debug { "cnf_config: #{config}" } destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized| app_namespace = resource[:namespace] || config.cnf_config[:helm_install_namespace] @@ -261,7 +256,7 @@ task "node_drain", ["install_litmus"] do |t, args| if spec_labels.as_h.size > 0 test_passed = true else - stdout_failure("No resource label found for #{testsuite_task} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") + stdout_failure("No resource label found for #{t.name} test for resource: #{resource["kind"]}/#{resource["name"]} in #{resource["namespace"]} namespace") test_passed = false end if test_passed @@ -319,10 +314,10 @@ task "node_drain", ["install_litmus"] do |t, args| # rbac_url = "https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/node-drain/rbac.yaml" rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/charts/generic/node-drain/rbac.yaml" - experiment_path = LitmusManager.download_template(experiment_url, "#{testsuite_task}_experiment.yaml") + experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml") KubectlClient::Apply.file(experiment_path, namespace: app_namespace) - rbac_path = LitmusManager.download_template(rbac_url, "#{testsuite_task}_rbac.yaml") + rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml") rbac_yaml = File.read(rbac_path) rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}") File.write(rbac_path, rbac_yaml) @@ -367,26 +362,19 @@ task "node_drain", ["install_litmus"] do |t, args| test_passed end if skipped - Log.for(testsuite_task).warn{"The node_drain test needs minimum 2 schedulable nodes, current number of nodes: #{KubectlClient::Get.schedulable_nodes_list.size}"} - resp = upsert_skipped_task(testsuite_task,"⏭️ πŸ† SKIPPED: node_drain chaos test requires the cluster to have atleast two schedulable nodes πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + Log.for(t.name).warn{"The node_drain test needs minimum 2 schedulable nodes, current number of nodes: #{KubectlClient::Get.schedulable_nodes_list.size}"} + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "node_drain chaos test requires the cluster to have atleast two schedulable nodes") elsif task_response - resp = upsert_passed_task(testsuite_task,"βœ”οΈ πŸ† PASSED: node_drain chaos test passed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "node_drain chaos test passed") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ πŸ† FAILED: node_drain chaos test failed πŸ—‘οΈπŸ’€β™»οΈ", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "node_drain chaos test failed") end end end desc "Does the CNF use an elastic persistent volume" -task "elastic_volumes" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "elastic_volumes" - Log.for(testsuite_task).info { "Starting test" } - - Log.info {"cnf_config: #{config}"} - - emoji_probe="🧫" +task "elastic_volumes" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| all_volumes_elastic = true volumes_used = false task_response = CNFManager.workload_resource_test(args, config, check_containers: false) do |resource, containers, volumes, initialized| @@ -402,7 +390,7 @@ task "elastic_volumes" do |_, args| full_resource = KubectlClient::Get.resource(resource["kind"], resource["name"], namespace) elastic_result = WorkloadResource.elastic?(full_resource, volumes.as_a, namespace) - Log.for("#{testsuite_task}:elastic_result").info {elastic_result} + Log.for("#{t.name}:elastic_result").info {elastic_result} unless elastic_result all_volumes_elastic = false end @@ -410,13 +398,12 @@ task "elastic_volumes" do |_, args| Log.for("elastic_volumes:result").info { "Volumes used: #{volumes_used}; Elastic?: #{all_volumes_elastic}" } if volumes_used == false - resp = upsert_skipped_task(testsuite_task,"⏭️ ✨SKIPPED: No volumes are used #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "No volumes are used") elsif all_volumes_elastic - resp = upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: All used volumes are elastic #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "All used volumes are elastic") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: Some of the used volumes are not elastic #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "Some of the used volumes are not elastic") end - resp end # TODO When using a default StorageClass, the storageclass name will be populated in the persistent volumes claim post-creation. @@ -430,16 +417,10 @@ task "elastic_volumes" do |_, args| end desc "Does the CNF use a database which uses perisistence in a cloud native way" -task "database_persistence" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "database_persistence" - Log.for(testsuite_task).info { "Starting test" } - - Log.info {"cnf_config: #{config}"} +task "database_persistence" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| # VERBOSE_LOGGING.info "database_persistence" if check_verbose(args) # todo K8s Database persistence test: if a mysql (or any popular database) image is installed: - emoji_probe="🧫" all_mysql_elastic_statefulset = true match = Mysql.match Log.info {"database_persistence mysql: #{match}"} @@ -476,16 +457,14 @@ task "database_persistence" do |_, args| end end end - failed_emoji = "(ΰ¦­_ΰ¦­) ήƒ πŸ’Ύ" if all_mysql_elastic_statefulset - resp = upsert_dynamic_task(testsuite_task,CNFManager::ResultStatus::Pass5, "βœ”οΈ PASSED: CNF uses database with cloud-native persistence #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Pass5, "CNF uses database with cloud-native persistence") else - resp = upsert_failed_task(testsuite_task,"βœ–οΈ FAILED: CNF uses database without cloud-native persistence #{failed_emoji}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "CNF uses database without cloud-native persistence (ΰ¦­_ΰ¦­) ήƒ πŸ’Ύ") end else - resp = upsert_skipped_task(testsuite_task, "⏭️ SKIPPED: CNF does not use database #{emoji_probe}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "CNF does not use database") end - resp end # TODO When using a default StorageClass, the storageclass name will be populated in the persistent volumes claim post-creation. @@ -499,15 +478,8 @@ task "database_persistence" do |_, args| end desc "Does the CNF use a non-cloud native data store: hostPath volume" -task "volume_hostpath_not_found" do |_, args| - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "volume_hostpath_not_found" - Log.for(testsuite_task).info { "Starting test" } - - failed_emoji = "(ΰ¦­_ΰ¦­) ήƒ πŸ’Ύ" - passed_emoji = "πŸ–₯️ πŸ’Ύ" - LOGGING.debug "cnf_config: #{config}" +task "volume_hostpath_not_found" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.cnf_workload_resources(args, config) do | resource| hostPath_found = nil @@ -525,29 +497,23 @@ task "volume_hostpath_not_found" do |_, args| end rescue ex VERBOSE_LOGGING.error ex.message if check_verbose(args) - puts "Rescued: On resource #{resource["metadata"]["name"]?} of kind #{resource["kind"]}, volumes not found. #{passed_emoji}".colorize(:yellow) + puts "Rescued: On resource #{resource["metadata"]["name"]?} of kind #{resource["kind"]}, volumes not found.".colorize(:yellow) hostPath_not_found = true end hostPath_not_found end if task_response.any?(false) - upsert_failed_task(testsuite_task,"βœ–οΈ FAILED: hostPath volumes found #{failed_emoji}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "hostPath volumes found (ΰ¦­_ΰ¦­) ήƒ") else - upsert_passed_task(testsuite_task,"βœ”οΈ PASSED: hostPath volumes not found #{passed_emoji}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "hostPath volumes not found πŸ–₯️") end end end desc "Does the CNF use a non-cloud native data store: local volumes on the node?" -task "no_local_volume_configuration" do |_, args| - failed_emoji = "(ΰ¦­_ΰ¦­) ήƒ πŸ’Ύ" - passed_emoji = "πŸ–₯️ πŸ’Ύ" - CNFManager::Task.task_runner(args) do |args, config| - task_start_time = Time.utc - testsuite_task = "no_local_volume_configuration" - Log.for(testsuite_task).info { "Starting test" } - +task "no_local_volume_configuration" do |t, args| + CNFManager::Task.task_runner(args, task: t) do |args, config| destination_cnf_dir = config.cnf_config[:destination_cnf_dir] task_response = CNFManager.cnf_workload_resources(args, config) do | resource| hostPath_found = nil @@ -560,7 +526,7 @@ task "no_local_volume_configuration" do |_, args| if resource["spec"].as_h["template"].as_h["spec"].as_h["volumes"]? volumes = resource["spec"].as_h["template"].as_h["spec"].as_h["volumes"].as_a end - Log.for(testsuite_task).debug { "volumes: #{volumes}" } + Log.for(t.name).debug { "volumes: #{volumes}" } persistent_volume_claim_names = volumes.map do |volume| # get persistent volume claim that matches persistent volume claim name if volume.as_h["persistentVolumeClaim"]? && volume.as_h["persistentVolumeClaim"].as_h["claimName"]? @@ -569,7 +535,7 @@ task "no_local_volume_configuration" do |_, args| nil end end.compact - Log.for(testsuite_task).debug { "persistent volume claim names: #{persistent_volume_claim_names}" } + Log.for(t.name).debug { "persistent volume claim names: #{persistent_volume_claim_names}" } # TODO (optional) check storage class of persistent volume claim # loop through all pvc names @@ -584,23 +550,23 @@ task "no_local_volume_configuration" do |_, args| local_storage_not_found = false end rescue ex - Log.for(testsuite_task).info { ex.message } + Log.for(t.name).info { ex.message } local_storage_not_found = true end end end rescue ex - Log.for(testsuite_task).error { ex.message } if check_verbose(args) - puts "Rescued: On resource #{resource["metadata"]["name"]?} of kind #{resource["kind"]}, local storage configuration volumes not found #{passed_emoji}".colorize(:yellow) + Log.for(t.name).error { ex.message } if check_verbose(args) + puts "Rescued: On resource #{resource["metadata"]["name"]?} of kind #{resource["kind"]}, local storage configuration volumes not found".colorize(:yellow) local_storage_not_found = true end local_storage_not_found end - if task_response.any?(false) - upsert_failed_task(testsuite_task,"βœ–οΈ ✨FAILED: local storage configuration volumes found #{failed_emoji}", task_start_time) + if task_response.any?(false) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Failed, "local storage configuration volumes found (ΰ¦­_ΰ¦­) ήƒ") else - upsert_passed_task(testsuite_task,"βœ”οΈ ✨PASSED: local storage configuration volumes not found #{passed_emoji}", task_start_time) + CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Passed, "local storage configuration volumes not found πŸ–₯️") end end end