Skip to content

Commit

Permalink
Merge branch 'pr-1862-1893' into pr/1899
Browse files Browse the repository at this point in the history
  • Loading branch information
HashNuke committed Feb 20, 2024
2 parents 3095507 + 0483561 commit c0f2fef
Show file tree
Hide file tree
Showing 12 changed files with 43 additions and 153 deletions.
8 changes: 0 additions & 8 deletions RATIONALE.md
Original file line number Diff line number Diff line change
Expand Up @@ -222,14 +222,6 @@ Binnie, Chris; McCune, Rory (2021-06-17T23:58:59). Cloud Native Security . Wiley

> Service externalIPs can be used for a MITM attack (CVE-2020-8554). Restrict externalIPs or limit to a known set of addresses. See: https://github.com/kyverno/kyverno/issues/1367
#### *To check if any containers are running as a root user (checks the user outside the container that is running dockerd)*: [non_root_user](docs/LIST_OF_TESTS.md#root-user)

> *Even with other security controls used within a Linux system running containers,
such as namespaces that segregate access between pods in Kubernetes and OpenShift or
containers within a runtime, it is highly advisable never to run a container as the
root user."* Binnie, Chris; McCune, Rory (2021-06-17T23:58:59). Cloud Native Security .
Wiley. Kindle Edition.

#### *To check if any containers allow for privilege escalation*: [privilege_escalation](docs/LIST_OF_TESTS.md#privilege-escalation)

> *When [privilege escalation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privilege-escalation) is [enabled for a container](https://hub.armo.cloud/docs/c-0016), it will allow setuid binaries to change the effective user ID, allowing processes to turn on extra capabilities.
Expand Down
2 changes: 1 addition & 1 deletion config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,5 @@ toggles:
# kind) will be changed, rebooted, chaos tested, etc
- name: destructive
toggle_on: false
loglevel: error
loglevel: info

2 changes: 0 additions & 2 deletions embedded_files/points.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@
- name: privileged
tags: security, dynamic, workload
# required: true
- name: non_root_user
tags: security, dynamic, workload
- name: privilege_escalation
tags: security, dynamic, workload, cert, normal
- name: symlink_file_system
Expand Down
2 changes: 1 addition & 1 deletion spec/cnf_testsuite_all/cnf_testsuite_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ describe CnfTestSuite do
# begin
# LOGGING.info `./cnf-testsuite cnf_setup cnf-config=./sample-cnfs/sample_privileged_cnf/cnf-testsuite.yml verbose wait_count=0`
# $?.success?.should be_true
# response_s = `./cnf-testsuite workload ~automatic_cnf_install ~ensure_cnf_installed ~configuration_file_setup ~compatibility ~state ~scalability ~configuration_lifecycle ~observability ~installability ~hardware_and_scheduling ~microservice ~resilience ~non_root_user`
# response_s = `./cnf-testsuite workload ~automatic_cnf_install ~ensure_cnf_installed ~configuration_file_setup ~compatibility ~state ~scalability ~configuration_lifecycle ~observability ~installability ~hardware_and_scheduling ~microservice ~resilience`
# LOGGING.info response_s
# $?.success?.should be_false
# (/Found.*privileged containers.*/ =~ response_s).should_not be_nil
Expand Down
3 changes: 2 additions & 1 deletion spec/platform/observability_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ describe "Platform Observability" do

LOGGING.info "Installing prometheus-node-exporter"
helm = Helm::BinarySingleton.helm
resp = `#{helm} install node-exporter stable/prometheus-node-exporter`
Helm.helm_repo_add("prometheus-community","https://prometheus-community.github.io/helm-charts")
resp = `#{helm} install node-exporter prometheus-community/prometheus-node-exporter`
LOGGING.info resp

pod_ready = ""
Expand Down
2 changes: 1 addition & 1 deletion spec/utils/cnf_manager_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ describe "SampleUtils" do

it "'CNFManager::Points.all_task_test_names' should return all tasks names", tags: ["points"] do
CNFManager::Points.clean_results_yml
tags = ["alpha_k8s_apis", "application_credentials", "cni_compatible", "container_sock_mounts", "database_persistence", "default_namespace", "disk_fill", "elastic_volumes", "external_ips", "hardcoded_ip_addresses_in_k8s_runtime_configuration", "helm_chart_published", "helm_chart_valid", "helm_deploy", "host_network", "host_pid_ipc_privileges", "hostpath_mounts", "hostport_not_used", "immutable_configmap", "immutable_file_systems", "increase_decrease_capacity", "ingress_egress_blocked", "insecure_capabilities", "ip_addresses", "latest_tag", "linux_hardening", "liveness", "log_output", "no_local_volume_configuration", "node_drain", "nodeport_not_used", "non_root_containers", "non_root_user", "open_metrics", "operator_installed", "oran_e2_connection", "pod_delete", "pod_dns_error", "pod_io_stress", "pod_memory_hog", "pod_network_corruption", "pod_network_duplication", "pod_network_latency", "privilege_escalation", "privileged", "privileged_containers", "prometheus_traffic", "readiness", "reasonable_image_size", "reasonable_startup_time", "require_labels", "resource_policies", "rollback", "rolling_downgrade", "rolling_update", "rolling_version_change", "routed_logs", "secrets_used", "selinux_options", "service_account_mapping", "service_discovery", "shared_database", "sig_term_handled", "single_process_type", "smf_upf_heartbeat", "specialized_init_system", "suci_enabled", "symlink_file_system", "sysctls", "tracing", "versioned_tag", "volume_hostpath_not_found"]
tags = ["alpha_k8s_apis", "application_credentials", "cni_compatible", "container_sock_mounts", "database_persistence", "default_namespace", "disk_fill", "elastic_volumes", "external_ips", "hardcoded_ip_addresses_in_k8s_runtime_configuration", "helm_chart_published", "helm_chart_valid", "helm_deploy", "host_network", "host_pid_ipc_privileges", "hostpath_mounts", "hostport_not_used", "immutable_configmap", "immutable_file_systems", "increase_decrease_capacity", "ingress_egress_blocked", "insecure_capabilities", "ip_addresses", "latest_tag", "linux_hardening", "liveness", "log_output", "no_local_volume_configuration", "node_drain", "nodeport_not_used", "non_root_containers", "open_metrics", "operator_installed", "oran_e2_connection", "pod_delete", "pod_dns_error", "pod_io_stress", "pod_memory_hog", "pod_network_corruption", "pod_network_duplication", "pod_network_latency", "privilege_escalation", "privileged", "privileged_containers", "prometheus_traffic", "readiness", "reasonable_image_size", "reasonable_startup_time", "require_labels", "resource_policies", "rollback", "rolling_downgrade", "rolling_update", "rolling_version_change", "routed_logs", "secrets_used", "selinux_options", "service_account_mapping", "service_discovery", "shared_database", "sig_term_handled", "single_process_type", "smf_upf_heartbeat", "specialized_init_system", "suci_enabled", "symlink_file_system", "sysctls", "tracing", "versioned_tag", "volume_hostpath_not_found"]
(CNFManager::Points.all_task_test_names()).sort.should eq(tags.sort)
end

Expand Down
9 changes: 5 additions & 4 deletions spec/utils/utils_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -191,12 +191,13 @@ describe "Utils" do
end

it "'task_runner' should run a test against a single cnf if passed a cnf-config argument even if there are multiple cnfs installed", tags: ["task_runner"] do
Log.info {`./cnf-testsuite cnf_setup cnf-config=sample-cnfs/sample-generic-cnf/cnf-testsuite.yml`}
Log.info {`./cnf-testsuite cnf_setup cnf-config=sample-cnfs/sample_privileged_cnf/cnf-testsuite.yml`}

response_s = `./cnf-testsuite cnf_setup cnf-config=sample-cnfs/sample-generic-cnf/cnf-testsuite.yml`
Log.info {response_s}
response_s = `./cnf-testsuite cnf_setup cnf-config=sample-cnfs/sample_privileged_cnf/cnf-testsuite.yml`
Log.info {response_s}
resp = `./cnf-testsuite privileged`
Log.info { resp }
(resp).includes?("✖️ FAILED: Found 1 privileged containers").should be_true
(resp).includes?("FAILED: Found 1 privileged containers").should be_true
ensure
response_s = `./cnf-testsuite cnf_cleanup cnf-config=sample-cnfs/sample-generic-cnf/cnf-testsuite.yml`
Log.info { response_s }
Expand Down
42 changes: 0 additions & 42 deletions spec/workload/security_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -4,48 +4,6 @@ require "../../src/tasks/utils/utils.cr"

describe "Security" do

it "'non_root_user' should pass with a non-root cnf", tags: ["security"] do
begin
LOGGING.info `./cnf-testsuite cnf_setup cnf-config=sample-cnfs/sample_nonroot/cnf-testsuite.yml`
response_s = `./cnf-testsuite non_root_user verbose`
LOGGING.info response_s
$?.success?.should be_true
(/Root user not found/ =~ response_s).should_not be_nil
ensure
LOGGING.info `./cnf-testsuite cnf_cleanup cnf-config=sample-cnfs/sample_nonroot/cnf-testsuite.yml`
LOGGING.debug `./cnf-testsuite uninstall_falco`
KubectlClient::Get.resource_wait_for_uninstall("DaemonSet", "falco")
end
end

it "'non_root_user' should fail with a root cnf", tags: ["security"] do
begin
LOGGING.info `./cnf-testsuite cnf_setup cnf-config=sample-cnfs/k8s-non-helm/cnf-testsuite.yml`
response_s = `./cnf-testsuite non_root_user verbose`
LOGGING.info response_s
$?.success?.should be_true
(/Root user found/ =~ response_s).should_not be_nil
ensure
LOGGING.info `./cnf-testsuite cnf_cleanup cnf-config=sample-cnfs/k8s-non-helm/cnf-testsuite.yml`
LOGGING.debug `./cnf-testsuite uninstall_falco`
KubectlClient::Get.resource_wait_for_uninstall("DaemonSet", "falco")
end
end

it "'non_root_user' should fail with a root cnf using a non-default namespace", tags: ["security"] do
begin
LOGGING.info `./cnf-testsuite cnf_setup cnf-config=sample-cnfs/ndn-non-root-user/cnf-testsuite.yml`
response_s = `./cnf-testsuite non_root_user verbose`
LOGGING.info response_s
$?.success?.should be_true
(/Root user found/ =~ response_s).should_not be_nil
ensure
LOGGING.info `./cnf-testsuite cnf_cleanup cnf-config=sample-cnfs/ndn-non-root-user/cnf-testsuite.yml`
LOGGING.debug `./cnf-testsuite uninstall_falco`
KubectlClient::Get.resource_wait_for_uninstall("DaemonSet", "falco")
end
end

it "'privileged' should pass with a non-privileged cnf", tags: ["privileged"] do
begin
LOGGING.debug `./cnf-testsuite cnf_setup cnf-config=sample-cnfs/sample-statefulset-cnf/cnf-testsuite.yml`
Expand Down
2 changes: 1 addition & 1 deletion src/cnf-testsuite.cr
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ task "upsert_release" do |_, args|
# LOGGING.info "upserting release on: #{ReleaseManager::VERSION}"
LOGGING.info "upserting release on: #{ReleaseManager::VERSION}"

ghrm = ReleaseManager::GithubReleaseManager.new("cncf/cnf-testsuite")
ghrm = ReleaseManager::GithubReleaseManager.new("cnti-testcatalog/testsuite")

release, asset = ghrm.upsert_release(version=ReleaseManager::VERSION)
if release
Expand Down
60 changes: 31 additions & 29 deletions src/tasks/workload/compatibility.cr
Original file line number Diff line number Diff line change
Expand Up @@ -193,10 +193,14 @@ task "increase_decrease_capacity" do |t, args|
testsuite_task = "increase_decrease_capacity"
Log.for(testsuite_task).info { "Starting test" }

Log.for(testsuite_task).info { "increase_capacity" }
Log.for(testsuite_task).info { "increase_decrease_capacity" }

increase_test_base_replicas = "1"
increase_test_target_replicas = "3"

decrease_test_base_replicas = "3"
decrease_test_target_replicas = "1"

# TODO scale replicatsets separately
# https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#scaling-a-replicaset
# resource["kind"].as_s.downcase == "replicaset"
Expand All @@ -209,41 +213,39 @@ task "increase_decrease_capacity" do |t, args|
true
end
end

decrease_test_base_replicas = "3"
decrease_test_target_replicas = "1"
decrease_task_response = CNFManager.cnf_workload_resources(args, config) do | resource|
# TODO scale replicatsets separately
# https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#scaling-a-replicaset
# resource["kind"].as_s.downcase == "replicaset"
if resource["kind"].as_s.downcase == "deployment" ||
resource["kind"].as_s.downcase == "statefulset"
final_count = change_capacity(decrease_test_base_replicas, decrease_test_target_replicas, args, config, resource)
decrease_test_target_replicas == final_count
else
true
increase_task_successful = increase_task_response.none?(false)

if increase_task_successful
decrease_task_response = CNFManager.cnf_workload_resources(args, config) do | resource|
# TODO scale replicatsets separately
# https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#scaling-a-replicaset
# resource["kind"].as_s.downcase == "replicaset"
if resource["kind"].as_s.downcase == "deployment" ||
resource["kind"].as_s.downcase == "statefulset"
final_count = change_capacity(decrease_test_base_replicas, decrease_test_target_replicas, args, config, resource)
decrease_test_target_replicas == final_count
else
true
end
end
end
decrease_task_successful = !decrease_task_response.nil? && decrease_task_response.none?(false)

emoji_capacity = "📦📈📉"

if increase_task_response.none?(false) && decrease_task_response.none?(false)
pass_msg = "✔️ 🏆 PASSED: Replicas increased to #{increase_test_target_replicas} and decreased to #{decrease_test_target_replicas} #{emoji_capacity}"
pass_msg = "✔️ 🏆 PASSED: Replicas increased to #{increase_test_target_replicas} and decreased to #{decrease_test_target_replicas} #{emoji_capacity}"
fail_msg = "✖️ FAILURE: Capacity change failed #{emoji_capacity}"


if increase_task_successful && decrease_task_successful
upsert_passed_task(testsuite_task, pass_msg, task_start_time)
else
upsert_failed_task(testsuite_task, "✖️ FAILURE: Capacity change failed #{emoji_capacity}", task_start_time)

# If increased capacity failed
if increase_task_response.any?(false)
upsert_failed_task(testsuite_task, fail_msg, task_start_time)
stdout_failure(increase_decrease_remedy_msg())
unless increase_task_successful
stdout_failure("Failed to increase replicas from #{increase_test_base_replicas} to #{increase_test_target_replicas}")
end

# If decrease capacity failed
if decrease_task_response.any?(false)
else
stdout_failure("Failed to decrease replicas from #{decrease_test_base_replicas} to #{decrease_test_target_replicas}")
end

stdout_failure(increase_decrease_remedy_msg())
end
end
end
end
Expand Down Expand Up @@ -376,7 +378,7 @@ def wait_for_scaling(resource, target_replica_count, args)
if args.named.keys.includes? "wait_count"
wait_count_value = args.named["wait_count"]
else
wait_count_value = "30"
wait_count_value = "45"
end
wait_count = wait_count_value.to_i
second_count = 0
Expand Down
1 change: 1 addition & 0 deletions src/tasks/workload/configuration.cr
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ task "ip_addresses" do |_, args|
emoji_network_runtime = "📶🏃⏲️"
helm_directory = config.cnf_config[:helm_directory]
helm_chart_path = config.cnf_config[:helm_chart_path]
Log.info { "Path: #{helm_chart_path}" }
if File.directory?(helm_chart_path)
# Switch to the helm chart directory
Dir.cd(helm_chart_path)
Expand Down
63 changes: 0 additions & 63 deletions src/tasks/workload/security.cr
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ require "../utils/utils.cr"
desc "CNF containers should be isolated from one another and the host. The CNF Test suite uses tools like Falco, Sysdig Inspect and gVisor"
task "security", [
"privileged",
"non_root_user",
"symlink_file_system",
"privilege_escalation",
"insecure_capabilities",
Expand Down Expand Up @@ -160,68 +159,6 @@ task "container_sock_mounts" do |_, args|
end
end

desc "Check if any containers are running in as root"
task "non_root_user", ["install_falco"] do |_, args|
CNFManager::Task.task_runner(args) do |args,config|
task_start_time = Time.utc
testsuite_task = "non_root_user"
Log.for(testsuite_task).info { "Starting test" }

unless KubectlClient::Get.resource_wait_for_install("Daemonset", "falco", namespace: TESTSUITE_NAMESPACE)
Log.info { "Falco Failed to Start" }
upsert_skipped_task(testsuite_task, "⏭️ SKIPPED: Skipping non_root_user: Falco failed to install. Check Kernel Headers are installed on the Host Systems(K8s).", task_start_time)
node_pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
pods = KubectlClient::Get.pods_by_label(node_pods, "app", "falco")

# Handle scenario when pod is not available when Falco is not installed.
if pods.size > 0
falco_pod_name = pods[0].dig("metadata", "name").as_s
Log.info { "Falco Pod Name: #{falco_pod_name}" }
KubectlClient.logs(falco_pod_name, namespace: TESTSUITE_NAMESPACE)
end
next
end

Log.for("verbose").info { "non_root_user" } if check_verbose(args)
Log.debug { "cnf_config: #{config}" }
fail_msgs = [] of String
task_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized|
test_passed = true
Log.info { "Falco is Running" }
kind = resource["kind"].downcase
case kind
when "deployment","statefulset","pod","replicaset", "daemonset"
resource_yaml = KubectlClient::Get.resource(resource[:kind], resource[:name], resource[:namespace])
pods = KubectlClient::Get.pods_by_resource(resource_yaml)
# containers = KubectlClient::Get.resource_containers(kind, resource[:name])
pods.map do |pod|
# containers.as_a.map do |container|
# container_name = container.dig("name")
pod_name = pod.dig("metadata", "name").as_s
# if Falco.find_root_pod(pod_name, container_name)
if Falco.find_root_pod(pod_name)
fail_msg = "resource: #{resource} and pod #{pod_name} uses a root user"
unless fail_msgs.find{|x| x== fail_msg}
puts fail_msg.colorize(:red)
fail_msgs << fail_msg
end
test_passed=false
end
end
end
test_passed
end
emoji_no_root="🚫√"
emoji_root=""

if task_response
upsert_passed_task(testsuite_task, "✔️ PASSED: Root user not found #{emoji_no_root}", task_start_time)
else
upsert_failed_task(testsuite_task, "✖️ FAILED: Root user found #{emoji_root}", task_start_time)
end
end
end

desc "Check if any containers are running in privileged mode"
task "privileged" do |_, args|
CNFManager::Task.task_runner(args) do |args, config|
Expand Down

0 comments on commit c0f2fef

Please sign in to comment.